From ed5b727e89db85a05c88abe1e63e52474fe63c67 Mon Sep 17 00:00:00 2001
From: Todd Hill <110035210+tkhill-AWS@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:40:16 -0400
Subject: [PATCH 01/98] Javav2: Examples that show working with S3 event
notifications (#6658)
Add example to demonstration S3EventNotification class usage and modify example that shows how to configure an S3 bucket to send notifications to EventBridge.
---
.doc_gen/metadata/s3_metadata.yaml | 29 +-
javav2/example_code/eventbridge/README.md | 13 +
javav2/example_code/s3/README.md | 27 +-
javav2/example_code/s3/pom.xml | 33 +-
.../s3/ProcessS3EventNotification.java | 273 ++++++++++
...tBucketS3EventNotificationEventBridge.java | 359 ++++++++++++++
.../s3/SetBucketEventBridgeNotification.java | 82 ---
.../s3/src/main/resources/direct-target.yaml | 469 ++++++++++++++++++
.../s3/src/main/resources/log4j2.xml | 2 +
.../s3/src/main/resources/queue-topic.yaml | 302 +++++++++++
.../s3/ProcessS3EventNotificationTest.java | 47 ++
...ketS3EventNotificationEventBridgeTest.java | 107 ++++
javav2/example_code/sqs/README.md | 13 +
.../javav2_s3_event_notification/README.md | 26 +
.../cdk/javav2_s3_event_notification/cdk.json | 68 +++
.../cdk/javav2_s3_event_notification/pom.xml | 60 +++
.../java/com/myorg/DirectTargetStack.java | 38 ++
.../main/java/com/myorg/EventBridgeStack.java | 65 +++
.../myorg/Javav2S3EventNotificationApp.java | 21 +
.../main/java/com/myorg/QueueTopicStack.java | 51 ++
20 files changed, 1999 insertions(+), 86 deletions(-)
create mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/ProcessS3EventNotification.java
create mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/PutBucketS3EventNotificationEventBridge.java
delete mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/SetBucketEventBridgeNotification.java
create mode 100644 javav2/example_code/s3/src/main/resources/direct-target.yaml
create mode 100644 javav2/example_code/s3/src/main/resources/queue-topic.yaml
create mode 100644 javav2/example_code/s3/src/test/java/com/example/s3/ProcessS3EventNotificationTest.java
create mode 100644 javav2/example_code/s3/src/test/java/com/example/s3/PutBucketS3EventNotificationEventBridgeTest.java
create mode 100644 resources/cdk/javav2_s3_event_notification/README.md
create mode 100644 resources/cdk/javav2_s3_event_notification/cdk.json
create mode 100644 resources/cdk/javav2_s3_event_notification/pom.xml
create mode 100644 resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/DirectTargetStack.java
create mode 100644 resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/EventBridgeStack.java
create mode 100644 resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/Javav2S3EventNotificationApp.java
create mode 100644 resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/QueueTopicStack.java
diff --git a/.doc_gen/metadata/s3_metadata.yaml b/.doc_gen/metadata/s3_metadata.yaml
index c04765701b4..0c4e4d7722a 100644
--- a/.doc_gen/metadata/s3_metadata.yaml
+++ b/.doc_gen/metadata/s3_metadata.yaml
@@ -2346,6 +2346,14 @@ s3_PutBucketNotificationConfiguration:
- description:
snippet_tags:
- S3.dotnetv3.EnableNotificationsExample
+ services:
+ s3: {PutBucketNotificationConfiguration}
+s3_Scenario_PutBucketNotificationConfiguration:
+ title: Send S3 event notifications to &EVlong; using an &AWS; SDK
+ title_abbrev: Send event notifications to &EV;
+ synopsis: enable a bucket to send S3 event notifications to &EV; and route notifications to an &SNS; topic and &SQS; queue.
+ category: Scenarios
+ languages:
Java:
versions:
- sdk_version: 2
@@ -2354,9 +2362,10 @@ s3_PutBucketNotificationConfiguration:
excerpts:
- description:
snippet_tags:
- - s3.java2.s3_enable_notifications.main
+ - s3.java2.s3_enable_notifications_to_eventbridge
services:
s3: {PutBucketNotificationConfiguration}
+ eventbridge: {PutRule, PutTargets}
s3_PutBucketAccelerateConfiguration:
languages:
.NET:
@@ -3302,3 +3311,21 @@ s3_Scenario_AbortMultipartUpload:
- s3.java2.abort_upload_using_lifecycle_config
services:
s3: {ListMultipartUploads, AbortMultipartUpload, PutBucketLifecycleConfiguration}
+s3_Scenario_ProcessS3EventNotification:
+ title: Receive and process &S3; event notifications by using an &AWS; SDK.
+ title_abbrev: Process S3 event notifications
+ synopsis: work with S3 event notifications in an object-oriented way.
+ category: Scenarios
+ languages:
+ Java:
+ versions:
+ - sdk_version: 2
+ github: javav2/example_code/s3
+ sdkguide:
+ excerpts:
+ - description: This example show how to process S3 notification event by using &SQS;.
+ snippet_tags:
+ - s3.java2.process_s3_event_notifications
+ services:
+ s3: {PutBucketNotificationConfiguration}
+ sqs: {ReceiveMessage, GetQueueAttributes, DeleteMessageBatch}
diff --git a/javav2/example_code/eventbridge/README.md b/javav2/example_code/eventbridge/README.md
index 123321cc248..e1b6a380a97 100644
--- a/javav2/example_code/eventbridge/README.md
+++ b/javav2/example_code/eventbridge/README.md
@@ -56,6 +56,7 @@ Code examples that show you how to accomplish a specific task by calling multipl
functions within the same service.
- [Get started with rules and targets](src/main/java/com/example/eventbridge/EventbridgeMVP.java)
+- [Send event notifications to EventBridge](../s3/src/main/java/com/example/s3/PutBucketS3EventNotificationEventBridge.java)
@@ -91,6 +92,18 @@ This example shows you how to do the following:
+#### Send event notifications to EventBridge
+
+This example shows you how to enable a bucket to send S3 event notifications to EventBridge and route notifications to an Amazon SNS topic and Amazon SQS queue.
+
+
+
+
+
+
+
+
+
### Tests
⚠ Running tests might result in charges to your AWS account.
diff --git a/javav2/example_code/s3/README.md b/javav2/example_code/s3/README.md
index bcc097b4e24..5aabc564946 100644
--- a/javav2/example_code/s3/README.md
+++ b/javav2/example_code/s3/README.md
@@ -57,7 +57,6 @@ Code excerpts that show you how to call individual service functions.
- [PutBucketAcl](src/main/java/com/example/s3/SetAcl.java#L6)
- [PutBucketCors](src/main/java/com/example/s3/S3Cors.java#L6)
- [PutBucketLifecycleConfiguration](src/main/java/com/example/s3/LifecycleConfiguration.java#L6)
-- [PutBucketNotificationConfiguration](src/main/java/com/example/s3/SetBucketEventBridgeNotification.java#L6)
- [PutBucketPolicy](src/main/java/com/example/s3/SetBucketPolicy.java#L6)
- [PutBucketWebsite](src/main/java/com/example/s3/SetWebsiteConfiguration.java#L6)
- [PutObject](src/main/java/com/example/s3/PutObject.java#L6)
@@ -79,6 +78,8 @@ functions within the same service.
- [Lock Amazon S3 objects](src/main/java/com/example/s3/lockscenario/S3ObjectLockWorkflow.java)
- [Parse URIs](src/main/java/com/example/s3/ParseUri.java)
- [Perform a multipart upload](src/main/java/com/example/s3/PerformMultiPartUpload.java)
+- [Process S3 event notifications](src/main/java/com/example/s3/ProcessS3EventNotification.java)
+- [Send event notifications to EventBridge](src/main/java/com/example/s3/PutBucketS3EventNotificationEventBridge.java)
- [Track uploads and downloads](src/main/java/com/example/s3/transfermanager/UploadFile.java)
- [Upload directory to a bucket](src/main/java/com/example/s3/transfermanager/UploadADirectory.java)
- [Upload or download large files](src/main/java/com/example/s3/transfermanager/DownloadToDirectory.java)
@@ -194,6 +195,30 @@ This example shows you how to perform a multipart upload to an Amazon S3 object.
+#### Process S3 event notifications
+
+This example shows you how to work with S3 event notifications in an object-oriented way.
+
+
+
+
+
+
+
+
+
+#### Send event notifications to EventBridge
+
+This example shows you how to enable a bucket to send S3 event notifications to EventBridge and route notifications to an Amazon SNS topic and Amazon SQS queue.
+
+
+
+
+
+
+
+
+
#### Track uploads and downloads
This example shows you how to track an Amazon S3 object upload or download.
diff --git a/javav2/example_code/s3/pom.xml b/javav2/example_code/s3/pom.xml
index 2284c95f640..ddeb5f5b1ca 100644
--- a/javav2/example_code/s3/pom.xml
+++ b/javav2/example_code/s3/pom.xml
@@ -73,7 +73,7 @@
software.amazon.awssdkbom
- 2.25.57
+ 2.26.14pomimport
@@ -87,6 +87,11 @@
+
+ com.amazonaws
+ aws-java-sdk-s3
+ 1.12.756
+ org.junit.jupiterjunit-jupiter-api
@@ -112,7 +117,7 @@
software.amazon.awssdk.crtaws-crt
- 0.29.14
+ 0.29.25org.junit.platform
@@ -124,6 +129,10 @@
software.amazon.awssdks3
+
+ software.amazon.awssdk
+ cloudformation
+ software.amazon.awssdkapache-client
@@ -136,6 +145,10 @@
software.amazon.awssdkssooidc
+
+ software.amazon.awssdk
+ sns
+ software.amazon.awssdks3-transfer-manager
@@ -164,6 +177,22 @@
software.amazon.awssdksts
+
+ software.amazon.awssdk
+ sqs
+
+
+ software.amazon.awssdk
+ s3-event-notifications
+
+
+ software.amazon.awssdk
+ eventbridge
+
+
+ software.amazon.awssdk
+ iam-policy-builder
+ org.apache.logging.log4jlog4j-core
diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/ProcessS3EventNotification.java b/javav2/example_code/s3/src/main/java/com/example/s3/ProcessS3EventNotification.java
new file mode 100644
index 00000000000..425eb228b9c
--- /dev/null
+++ b/javav2/example_code/s3/src/main/java/com/example/s3/ProcessS3EventNotification.java
@@ -0,0 +1,273 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.example.s3;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.eventnotifications.s3.model.S3EventNotification;
+import software.amazon.awssdk.services.cloudformation.CloudFormationAsyncClient;
+import software.amazon.awssdk.services.cloudformation.model.Capability;
+import software.amazon.awssdk.services.cloudformation.model.CloudFormationException;
+import software.amazon.awssdk.services.cloudformation.waiters.CloudFormationAsyncWaiter;
+import software.amazon.awssdk.services.s3.S3AsyncClient;
+import software.amazon.awssdk.services.s3.model.Bucket;
+import software.amazon.awssdk.services.s3.model.Event;
+import software.amazon.awssdk.services.s3.model.ListBucketsResponse;
+import software.amazon.awssdk.services.sqs.SqsAsyncClient;
+import software.amazon.awssdk.services.sqs.model.DeleteMessageBatchRequest;
+import software.amazon.awssdk.services.sqs.model.DeleteMessageBatchRequestEntry;
+import software.amazon.awssdk.services.sqs.model.ListQueuesResponse;
+import software.amazon.awssdk.services.sqs.model.QueueAttributeName;
+import software.amazon.awssdk.services.sqs.model.ReceiveMessageResponse;
+import software.amazon.awssdk.transfer.s3.S3TransferManager;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.HashSet;
+import java.util.Optional;
+import java.util.concurrent.ExecutionException;
+
+public class ProcessS3EventNotification {
+ static final CloudFormationAsyncClient cfClient = CloudFormationAsyncClient.create();
+ static final SqsAsyncClient sqsClient = SqsAsyncClient.create();
+ static final S3AsyncClient s3Client = S3AsyncClient.create();
+ static final S3TransferManager transferManager = S3TransferManager.create();
+ static final String STACK_NAME = "direct-target";
+ private static final Logger logger = LoggerFactory.getLogger(ProcessS3EventNotification.class);
+
+ public static void main(String[] args) {
+ deployCloudFormationStack();
+ String queueUrl = getQueueUrl();
+ String queueArn = getQueueArn(queueUrl);
+ String bucketName = getBucketName();
+ processS3Events(bucketName, queueUrl, queueArn);
+ destroyCloudFormationStack();
+ }
+
+// snippet-start:[s3.java2.process_s3_event_notifications]
+ /**
+ * This method receives S3 event notifications by using an SqsAsyncClient.
+ * After the client receives the messages it deserializes the JSON payload and logs them. It uses
+ * the S3EventNotification class (part of the S3 event notification API for Java) to deserialize
+ * the JSON payload and access the messages in an object-oriented way.
+ *
+ * @param queueUrl The URL of the AWS SQS queue that receives the S3 event notifications.
+ * @see S3EventNotification API.
+ *
+ * To use S3 event notification serialization/deserialization to objects, add the following
+ * dependency to your Maven pom.xml file.
+ *
+ * software.amazon.awssdk
+ * s3-event-notifications
+ *
+ *
+ *
+ * The S3 event notification API became available with version 2.25.11 of the Java SDK.
+ *
+ * This example shows the use of the API with AWS SQS, but it can be used to process S3 event notifications
+ * in AWS SNS or AWS Lambda as well.
+ *
+ * Note: The S3EventNotification class does not work with messages routed through AWS EventBridge.
+ */
+ static void processS3Events(String bucketName, String queueUrl, String queueArn) {
+ try {
+ // Configure the bucket to send Object Created and Object Tagging notifications to an existing SQS queue.
+ s3Client.putBucketNotificationConfiguration(b -> b
+ .notificationConfiguration(ncb -> ncb
+ .queueConfigurations(qcb -> qcb
+ .events(Event.S3_OBJECT_CREATED, Event.S3_OBJECT_TAGGING)
+ .queueArn(queueArn)))
+ .bucket(bucketName)
+ ).join();
+
+ triggerS3EventNotifications(bucketName);
+ // Wait for event notifications to propagate.
+ Thread.sleep(Duration.ofSeconds(5).toMillis());
+
+ boolean didReceiveMessages = true;
+ while (didReceiveMessages) {
+ // Display the number of messages that are available in the queue.
+ sqsClient.getQueueAttributes(b -> b
+ .queueUrl(queueUrl)
+ .attributeNames(QueueAttributeName.APPROXIMATE_NUMBER_OF_MESSAGES)
+ ).thenAccept(attributeResponse ->
+ logger.info("Approximate number of messages in the queue: {}",
+ attributeResponse.attributes().get(QueueAttributeName.APPROXIMATE_NUMBER_OF_MESSAGES)))
+ .join();
+
+ // Receive the messages.
+ ReceiveMessageResponse response = sqsClient.receiveMessage(b -> b
+ .queueUrl(queueUrl)
+ ).get();
+ logger.info("Count of received messages: {}", response.messages().size());
+ didReceiveMessages = !response.messages().isEmpty();
+
+ // Create a collection to hold the received message for deletion
+ // after we log the messages.
+ HashSet messagesToDelete = new HashSet<>();
+ // Process each message.
+ response.messages().forEach(message -> {
+ logger.info("Message id: {}", message.messageId());
+ // Deserialize JSON message body to a S3EventNotification object
+ // to access messages in an object-oriented way.
+ S3EventNotification event = S3EventNotification.fromJson(message.body());
+
+ // Log the S3 event notification record details.
+ if (event.getRecords() != null) {
+ event.getRecords().forEach(record -> {
+ String eventName = record.getEventName();
+ String key = record.getS3().getObject().getKey();
+ logger.info(record.toString());
+ logger.info("Event name is {} and key is {}", eventName, key);
+ });
+ }
+ // Add logged messages to collection for batch deletion.
+ messagesToDelete.add(DeleteMessageBatchRequestEntry.builder()
+ .id(message.messageId())
+ .receiptHandle(message.receiptHandle())
+ .build());
+ });
+ // Delete messages.
+ if (!messagesToDelete.isEmpty()) {
+ sqsClient.deleteMessageBatch(DeleteMessageBatchRequest.builder()
+ .queueUrl(queueUrl)
+ .entries(messagesToDelete)
+ .build()
+ ).join();
+ }
+ } // End of while block.
+ } catch (InterruptedException | ExecutionException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ // snippet-end:[s3.java2.process_s3_event_notifications]
+ static void triggerS3EventNotifications(String bucketName) {
+ Path uploadDir;
+ try {
+ uploadDir = Paths.get(
+ ProcessS3EventNotification.class.getClassLoader().getResource("uploadDirectory").toURI());
+ } catch (URISyntaxException e) {
+ throw new RuntimeException(e);
+ }
+
+ transferManager.uploadDirectory(b -> b
+ .bucket(bucketName)
+ .source(uploadDir)
+ .build()).completionFuture()
+ .whenComplete((completedUpload, t) -> {
+ if (t != null) {
+ logger.error("Failed to upload directory", t);
+ return;
+ }
+ completedUpload.failedTransfers().forEach(failedUpload ->
+ logger.error("Object {} failed to upload with exception {}",
+ failedUpload.request().putObjectRequest().key(),
+ failedUpload.exception().getMessage())
+ );
+ }).join();
+ try {
+ Thread.sleep(Duration.ofSeconds(1).toMillis());
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+
+ try (S3AsyncClient s3Client = S3AsyncClient.create()) {
+ s3Client.listObjects(b -> b.bucket(bucketName))
+ .thenAccept(listObjectsResponse ->
+ listObjectsResponse.contents().forEach(s3Object -> {
+ logger.info("Object key is " + s3Object.key());
+ s3Client.putObjectTagging(potr -> potr
+ .bucket(bucketName)
+ .key(s3Object.key())
+ .tagging(tb ->
+ tb.tagSet(tsb -> tsb.key("akey").value("avalue"))
+ )
+ ).join();
+ })).join();
+ } // End of try-with-resources block.
+ }
+
+ static String getQueueUrl() {
+ ListQueuesResponse response = sqsClient.listQueues().join();
+ Optional queueUrl = response.queueUrls().stream()
+ .filter(url -> url.contains(STACK_NAME))
+ .findFirst();
+ return queueUrl.orElse(null);
+ }
+
+ static String getQueueArn(String queueUrl){
+ return sqsClient.getQueueAttributes(b -> b
+ .queueUrl(queueUrl).attributeNames(QueueAttributeName.QUEUE_ARN)).join()
+ .attributes().get(QueueAttributeName.QUEUE_ARN);
+ }
+
+ static String getBucketName() {
+ ListBucketsResponse listBucketsResponse = s3Client.listBuckets().join();
+ for (Bucket bucket : listBucketsResponse.buckets()) {
+ if (bucket.name().contains(STACK_NAME)) {
+ return bucket.name();
+ }
+ }
+ return null;
+ }
+
+ static void deployCloudFormationStack() {
+ try {
+ URL fileUrl = ProcessS3EventNotification.class.getClassLoader().getResource(STACK_NAME + ".yaml");
+ String templateBody;
+ try {
+ templateBody = Files.readString(Paths.get(fileUrl.toURI()));
+
+ } catch (IOException | URISyntaxException e) {
+ throw new RuntimeException(e);
+ }
+
+ cfClient.createStack(b -> b.stackName(STACK_NAME)
+ .templateBody(templateBody)
+ .capabilities(Capability.CAPABILITY_IAM))
+ .whenComplete((csr, t) -> {
+ if (csr != null) {
+ logger.info("Stack creation requested, ARN is " + csr.stackId());
+ try (CloudFormationAsyncWaiter waiter = cfClient.waiter()) {
+ waiter.waitUntilStackCreateComplete(request -> request.stackName(STACK_NAME))
+ .whenComplete((dsr, th) -> {
+ dsr.matched().response().orElseThrow(() -> new RuntimeException("Failed to deploy"));
+ }).join();
+ }
+ logger.info("Stack created successfully");
+ } else {
+ logger.error("Error creating stack: " + t.getMessage(), t);
+ throw new RuntimeException(t.getCause().getMessage(), t);
+ }
+ }).join();
+ } catch (CloudFormationException ex) {
+ throw new RuntimeException("Failed to deploy CloudFormation stack", ex);
+ }
+ }
+
+ static void destroyCloudFormationStack() {
+ String stackName = STACK_NAME;
+ cfClient.deleteStack(b -> b.stackName(stackName))
+ .whenComplete((dsr, t) -> {
+ if (dsr != null) {
+ logger.info("Delete stack requested ....");
+ try (CloudFormationAsyncWaiter waiter = cfClient.waiter()) {
+ waiter.waitUntilStackDeleteComplete(request -> request.stackName(stackName))
+ .whenComplete((waiterResponse, throwable) ->
+ logger.info("Stack deleted successfully."))
+ .join();
+ }
+ } else {
+ logger.error("Error deleting stack: " + t.getMessage(), t);
+ throw new RuntimeException(t.getCause().getMessage(), t);
+ }
+ }).join();
+ }
+}
diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/PutBucketS3EventNotificationEventBridge.java b/javav2/example_code/s3/src/main/java/com/example/s3/PutBucketS3EventNotificationEventBridge.java
new file mode 100644
index 00000000000..aa31cf598ca
--- /dev/null
+++ b/javav2/example_code/s3/src/main/java/com/example/s3/PutBucketS3EventNotificationEventBridge.java
@@ -0,0 +1,359 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.example.s3;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.policybuilder.iam.IamConditionOperator;
+import software.amazon.awssdk.policybuilder.iam.IamPolicy;
+import software.amazon.awssdk.policybuilder.iam.IamPolicyWriter;
+import software.amazon.awssdk.policybuilder.iam.IamPrincipalType;
+import software.amazon.awssdk.services.cloudformation.CloudFormationAsyncClient;
+import software.amazon.awssdk.services.cloudformation.model.Capability;
+import software.amazon.awssdk.services.cloudformation.model.CloudFormationException;
+import software.amazon.awssdk.services.cloudformation.waiters.CloudFormationAsyncWaiter;
+import software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient;
+import software.amazon.awssdk.services.eventbridge.model.EventBus;
+import software.amazon.awssdk.services.eventbridge.model.ListEventBusesResponse;
+import software.amazon.awssdk.services.eventbridge.model.PutRuleRequest;
+import software.amazon.awssdk.services.eventbridge.model.PutRuleResponse;
+import software.amazon.awssdk.services.eventbridge.model.Target;
+import software.amazon.awssdk.services.s3.S3AsyncClient;
+import software.amazon.awssdk.services.s3.model.Bucket;
+import software.amazon.awssdk.services.s3.model.S3Exception;
+import software.amazon.awssdk.services.sns.SnsAsyncClient;
+import software.amazon.awssdk.services.sns.model.ListTopicsResponse;
+import software.amazon.awssdk.services.sns.model.Topic;
+import software.amazon.awssdk.services.sqs.SqsAsyncClient;
+import software.amazon.awssdk.services.sqs.model.ListQueuesResponse;
+import software.amazon.awssdk.services.sqs.model.QueueAttributeName;
+import software.amazon.awssdk.utils.builder.SdkBuilder;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+public class PutBucketS3EventNotificationEventBridge {
+ static final CloudFormationAsyncClient cfClient = CloudFormationAsyncClient.create();
+ static final SqsAsyncClient sqsClient = SqsAsyncClient.create();
+ static final SnsAsyncClient snsClient = SnsAsyncClient.create();
+ static final EventBridgeAsyncClient eventBridgeClient = EventBridgeAsyncClient.create();
+ static final String STACK_NAME = "queue-topic";
+ static final String RULE_NAME = "s3-object-create-rule";
+ static final S3AsyncClient s3Client = S3AsyncClient.create();
+ private static final Logger logger = LoggerFactory.getLogger(PutBucketS3EventNotificationEventBridge.class);
+
+ public static void main(String[] args) {
+ deployCloudFormationStack();
+ String bucketName = getBucketName();
+ String topicArn = getTopicArn();
+ String directToQueueUrl = getQueueUrl(false);
+ String directToQueueArn = getQueueArn(directToQueueUrl);
+ String subscriberQueueUrl = getQueueUrl(true);
+ String subscriberQueueArn = getQueueArn(subscriberQueueUrl);
+ String ruleArn = setBucketNotificationToEventBridge(bucketName, topicArn, directToQueueArn);
+ addPermissions(directToQueueArn, directToQueueUrl,
+ subscriberQueueArn, subscriberQueueUrl, topicArn, ruleArn);
+ deleteRule();
+ destroyCloudFormationStack();
+ }
+// snippet-start:[s3.java2.s3_enable_notifications_to_eventbridge]
+ /** This method configures a bucket to send events to AWS EventBridge and creates a rule
+ * to route the S3 object created events to a topic and a queue.
+ *
+ * @param bucketName Name of existing bucket
+ * @param topicArn ARN of existing topic to receive S3 event notifications
+ * @param queueArn ARN of existing queue to receive S3 event notifications
+ *
+ * An AWS CloudFormation stack sets up the bucket, queue, topic before the method runs.
+ */
+ public static String setBucketNotificationToEventBridge(String bucketName, String topicArn, String queueArn) {
+ try {
+ // Enable bucket to emit S3 Event notifications to EventBridge.
+ s3Client.putBucketNotificationConfiguration(b -> b
+ .bucket(bucketName)
+ .notificationConfiguration(b1 -> b1
+ .eventBridgeConfiguration(
+ SdkBuilder::build)
+ ).build()).join();
+
+ // Create an EventBridge rule to route Object Created notifications.
+ PutRuleRequest putRuleRequest = PutRuleRequest.builder()
+ .name(RULE_NAME)
+ .eventPattern("""
+ {
+ "source": ["aws.s3"],
+ "detail-type": ["Object Created"],
+ "detail": {
+ "bucket": {
+ "name": ["%s"]
+ }
+ }
+ }
+ """.formatted(bucketName))
+ .build();
+
+ // Add the rule to the default event bus.
+ PutRuleResponse putRuleResponse = eventBridgeClient.putRule(putRuleRequest)
+ .whenComplete((r, t) -> {
+ if (t != null) {
+ logger.error("Error creating event bus rule: " + t.getMessage(), t);
+ throw new RuntimeException(t.getCause().getMessage(), t);
+ }
+ logger.info("Event bus rule creation request sent successfully. ARN is: {}", r.ruleArn());
+ }).join();
+
+ // Add the existing SNS topic and SQS queue as targets to the rule.
+ eventBridgeClient.putTargets(b -> b
+ .eventBusName("default")
+ .rule(RULE_NAME)
+ .targets(List.of (
+ Target.builder()
+ .arn(queueArn)
+ .id("Queue")
+ .build(),
+ Target.builder()
+ .arn(topicArn)
+ .id("Topic")
+ .build())
+ )
+ ).join();
+ return putRuleResponse.ruleArn();
+ } catch (S3Exception e) {
+ System.err.println(e.awsErrorDetails().errorMessage());
+ System.exit(1);
+ }
+ return null;
+ }
+ // snippet-end:[s3.java2.s3_enable_notifications_to_eventbridge]
+
+ /** After we create the EventBridge rule, we add the necessary permissions to the resources
+ * that receive messages from the rule.
+ *
+ * @param directQueueArn ARN of the queue that receives notifications from the S3 bucket directly
+ * @param directQueueUrl URL of the queue that receives notifications from the S3 bucket directly
+ * @param subscriberQueueArn ARN of the queue that receives notifications through the subscription to the SNS topic
+ * @param subscriberQueueUrl URL of the queue that receives notifications through the subscription to the SNS topic
+ * @param topicArn ARN of the topic that receives notifications from the S3 bucket
+ * @param ruleArn ARN of the EventBridge rule
+ */
+ static void addPermissions(String directQueueArn, String directQueueUrl, String subscriberQueueArn,
+ String subscriberQueueUrl, String topicArn, String ruleArn){
+ addPermissionToDirectQueue(sqsClient, directQueueArn, directQueueUrl, ruleArn);
+ addPermissionToSubscriberQueue(sqsClient, subscriberQueueArn, subscriberQueueUrl, topicArn);
+ addPermissionToTopic(snsClient, topicArn, ruleArn);
+ }
+
+ static void addPermissionToDirectQueue(SqsAsyncClient sqsClient, String queueArn, String queueUrl, String roleArn){
+ /*
+ We use the Java SDK's IAM Policy Builder API to generate a policy.
+ This requires the following Maven dependency:
+
+ software.amazon.awssdk
+ iam-policy-builder
+
+ */
+ String policyString = IamPolicy.builder()
+ .version("2012-10-17")
+ .id(queueArn)
+ .addStatement(b -> b
+ .sid("AllowEventsToQueue")
+ .effect("Allow")
+ .addPrincipal(pb -> pb
+ .type(IamPrincipalType.SERVICE)
+ .id("events.amazonaws.com"))
+ .addAction("sqs:SendMessage")
+ .addResource(queueArn)
+ .addCondition(cb -> cb
+ .operator(IamConditionOperator.ARN_EQUALS)
+ .key("aws:SourceArn")
+ .value(roleArn))
+ .build())
+ .build().toJson(
+ IamPolicyWriter.builder().prettyPrint(true).build());
+
+ sqsClient.setQueueAttributes(b -> b
+ .queueUrl(queueUrl)
+ .attributes(Map.of (QueueAttributeName.POLICY, policyString)))
+ .join();
+ }
+
+ static void addPermissionToSubscriberQueue(SqsAsyncClient sqsClient, String queueArn, String queueUrl, String topicArn){
+ String policyString = IamPolicy.builder()
+ .version("2012-10-17")
+ .id(queueArn)
+ .addStatement(b -> b
+ .sid("AllowMessageToSubscriberQueue")
+ .effect("Allow")
+ .addPrincipal(pb -> pb.type(IamPrincipalType.SERVICE)
+ .id("sns.amazonaws.com"))
+ .addAction("sqs:SendMessage")
+ .addResource(queueArn)
+ .addCondition(cb -> cb
+ .operator(IamConditionOperator.ARN_EQUALS)
+ .key("aws:SourceArn")
+ .value(topicArn))
+ .build())
+ .build().toJson(
+ IamPolicyWriter.builder().prettyPrint(true).build()
+ );
+
+ sqsClient.setQueueAttributes(b -> b
+ .queueUrl(queueUrl)
+ .attributes(Map.of (QueueAttributeName.POLICY, policyString)))
+ .join();
+ }
+
+ static void addPermissionToTopic(SnsAsyncClient snsClient, String topicArn, String roleArn){
+ snsClient.getTopicAttributes(b -> b
+ .topicArn(topicArn).build())
+ .thenApply(ar -> {
+ String policy = ar.attributes().get("Policy");
+ IamPolicy iamPolicy = IamPolicy.fromJson(policy);
+ return iamPolicy.copy(b -> b.addStatement(sb -> sb
+ .sid("AllowEventsToTopic")
+ .effect("Allow")
+ .addPrincipal(pb -> pb
+ .type(IamPrincipalType.SERVICE)
+ .id("events.amazonaws.com"))
+ .addAction("sns:Publish")
+ .addResource(topicArn)
+ .addCondition(cb -> cb
+ .operator(IamConditionOperator.ARN_EQUALS)
+ .key("aws:SourceArn")
+ .value(roleArn))
+ .build())).toJson(IamPolicyWriter.builder().prettyPrint(true).build());
+ }).thenAccept(policy -> snsClient.setTopicAttributes(b -> b
+ .attributeName("Policy")
+ .attributeValue(policy)
+ .topicArn(topicArn)))
+ .join();
+ }
+
+ static String getBucketName() {
+ return s3Client.listBuckets().handle((r, t) -> {
+ for (Bucket bucket : r.buckets()) {
+ if (bucket.name().startsWith(STACK_NAME.substring(0, 10))) {
+ return bucket.name();
+ }
+ }
+ return null;
+ }).join();
+ }
+
+ static String getQueueUrl(boolean isSubscriberQueue){
+ ListQueuesResponse response = sqsClient.listQueues().join();
+ String queueUrl;
+ if (!isSubscriberQueue) {
+ queueUrl = response.queueUrls().stream()
+ .filter(url -> url.contains(STACK_NAME) && !url.contains("Subscriber"))
+ .findFirst()
+ .orElse(null);
+ } else {
+ queueUrl = response.queueUrls().stream()
+ .filter(url -> url.contains(STACK_NAME) && url.contains("Subscriber"))
+ .findFirst()
+ .orElse(null);
+ }
+ if (queueUrl == null) {
+ throw new RuntimeException("Queue URL not found");
+ }
+ return queueUrl;
+ }
+
+ static String getQueueArn(String queueUrl) {
+ return sqsClient.getQueueAttributes(b -> b
+ .queueUrl(queueUrl).attributeNames(QueueAttributeName.QUEUE_ARN)).join()
+ .attributes().get(QueueAttributeName.QUEUE_ARN);
+ }
+
+ static String getTopicArn() {
+ ListTopicsResponse response = snsClient.listTopics().join();
+ Optional topicArn = response.topics().stream()
+ .map(Topic::topicArn)
+ .filter(s -> s.contains(STACK_NAME))
+ .findFirst();
+ return topicArn.orElse(null);
+ }
+
+ static String getEventBusName() {
+ ListEventBusesResponse eventBusesResponse = eventBridgeClient.listEventBuses(SdkBuilder::build).join();
+ final Optional busName = eventBusesResponse.eventBuses().stream()
+ .map(EventBus::name)
+ .filter(s -> s.startsWith(STACK_NAME.substring(0, 4)))
+ .findFirst();
+ return busName.orElse(null);
+
+ }
+
+ static void deployCloudFormationStack() {
+ try {
+ URL fileUrl = PutBucketS3EventNotificationEventBridge.class.getClassLoader().getResource(STACK_NAME + ".yaml");
+ String templateBody;
+ try {
+ templateBody = Files.readString(Paths.get(fileUrl.toURI()));
+
+ } catch (IOException | URISyntaxException e) {
+ throw new RuntimeException(e);
+ }
+
+ cfClient.createStack(b -> b.stackName(STACK_NAME)
+ .templateBody(templateBody)
+ .capabilities(Capability.CAPABILITY_IAM))
+ .whenComplete((csr, t) -> {
+ if (csr != null) {
+ logger.info("Stack creation requested, ARN is " + csr.stackId());
+ try (CloudFormationAsyncWaiter waiter = cfClient.waiter()) {
+ waiter.waitUntilStackCreateComplete(request -> request.stackName(STACK_NAME))
+ .whenComplete((dsr, th) -> {
+ dsr.matched().response().orElseThrow(() -> new RuntimeException("Failed to deploy"));
+ }).join();
+ }
+ logger.info("Stack created successfully");
+ } else {
+ logger.error("Error creating stack: " + t.getMessage(), t);
+ throw new RuntimeException(t.getCause().getMessage(), t);
+ }
+ }).join();
+ } catch (CloudFormationException ex) {
+ throw new RuntimeException("Failed to deploy CloudFormation stack", ex);
+ }
+ }
+
+ static void destroyCloudFormationStack() {
+ String stackName = STACK_NAME;
+ cfClient.deleteStack(b -> b.stackName(stackName))
+ .whenComplete((dsr, t) -> {
+ if (dsr != null) {
+ logger.info("Delete stack requested ....");
+ try (CloudFormationAsyncWaiter waiter = cfClient.waiter()) {
+ waiter.waitUntilStackDeleteComplete(request -> request.stackName(stackName))
+ .whenComplete((waiterResponse, throwable) ->
+ logger.info("Stack deleted successfully."))
+ .join();
+ }
+ } else {
+ logger.error("Error deleting stack: " + t.getMessage(), t);
+ throw new RuntimeException(t.getCause().getMessage(), t);
+ }
+ }).join();
+ }
+
+ static void deleteRule() {
+
+ eventBridgeClient.removeTargets(b -> b
+ .rule(RULE_NAME)
+ .ids("Queue", "Topic")
+ .build()).join();
+
+ eventBridgeClient.deleteRule(b -> b
+ .name(RULE_NAME)
+ .build()).join();
+ }
+}
diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/SetBucketEventBridgeNotification.java b/javav2/example_code/s3/src/main/java/com/example/s3/SetBucketEventBridgeNotification.java
deleted file mode 100644
index 8973461696a..00000000000
--- a/javav2/example_code/s3/src/main/java/com/example/s3/SetBucketEventBridgeNotification.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-package com.example.s3;
-
-// snippet-start:[s3.java2.s3_enable_notifications.main]
-import software.amazon.awssdk.regions.Region;
-import software.amazon.awssdk.services.s3.S3Client;
-import software.amazon.awssdk.services.s3.model.Event;
-import software.amazon.awssdk.services.s3.model.NotificationConfiguration;
-import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationRequest;
-import software.amazon.awssdk.services.s3.model.S3Exception;
-import software.amazon.awssdk.services.s3.model.TopicConfiguration;
-import java.util.ArrayList;
-import java.util.List;
-
-public class SetBucketEventBridgeNotification {
- public static void main(String[] args) {
- final String usage = """
-
- Usage:
- \s
-
- Where:
- bucketName - The Amazon S3 bucket.\s
- topicArn - The Simple Notification Service topic ARN.\s
- id - An id value used for the topic configuration. This value is displayed in the AWS Management Console.\s
- """;
-
- if (args.length != 3) {
- System.out.println(usage);
- System.exit(1);
- }
-
- String bucketName = args[0];
- String topicArn = args[1];
- String id = args[2];
- Region region = Region.US_EAST_1;
- S3Client s3Client = S3Client.builder()
- .region(region)
- .build();
-
- setBucketNotification(s3Client, bucketName, topicArn, id);
- s3Client.close();
- }
-
- public static void setBucketNotification(S3Client s3Client, String bucketName, String topicArn, String id) {
- try {
- List events = new ArrayList<>();
- events.add(Event.S3_OBJECT_CREATED_PUT);
-
- TopicConfiguration config = TopicConfiguration.builder()
- .topicArn(topicArn)
- .events(events)
- .id(id)
- .build();
-
- List topics = new ArrayList<>();
- topics.add(config);
-
- NotificationConfiguration configuration = NotificationConfiguration.builder()
- .topicConfigurations(topics)
- .build();
-
- PutBucketNotificationConfigurationRequest configurationRequest = PutBucketNotificationConfigurationRequest
- .builder()
- .bucket(bucketName)
- .notificationConfiguration(configuration)
- .skipDestinationValidation(true)
- .build();
-
- // Set the bucket notification configuration.
- s3Client.putBucketNotificationConfiguration(configurationRequest);
- System.out.println("Added bucket " + bucketName + " with EventBridge events enabled.");
-
- } catch (S3Exception e) {
- System.err.println(e.awsErrorDetails().errorMessage());
- System.exit(1);
- }
- }
-}
-// snippet-end:[s3.java2.s3_enable_notifications.main]
diff --git a/javav2/example_code/s3/src/main/resources/direct-target.yaml b/javav2/example_code/s3/src/main/resources/direct-target.yaml
new file mode 100644
index 00000000000..df9bed6b961
--- /dev/null
+++ b/javav2/example_code/s3/src/main/resources/direct-target.yaml
@@ -0,0 +1,469 @@
+Resources:
+ s3EventNotificationBucketB63AEF24:
+ Type: AWS::S3::Bucket
+ Properties:
+ Tags:
+ - Key: aws-cdk:auto-delete-objects
+ Value: "true"
+ UpdateReplacePolicy: Delete
+ DeletionPolicy: Delete
+ Metadata:
+ aws:cdk:path: direct-target/s3EventNotificationBucket/Resource
+ s3EventNotificationBucketPolicyEEB8E6AC:
+ Type: AWS::S3::BucketPolicy
+ Properties:
+ Bucket:
+ Ref: s3EventNotificationBucketB63AEF24
+ PolicyDocument:
+ Statement:
+ - Action:
+ - s3:DeleteObject*
+ - s3:GetBucket*
+ - s3:List*
+ - s3:PutBucketPolicy
+ Effect: Allow
+ Principal:
+ AWS:
+ Fn::GetAtt:
+ - CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092
+ - Arn
+ Resource:
+ - Fn::GetAtt:
+ - s3EventNotificationBucketB63AEF24
+ - Arn
+ - Fn::Join:
+ - ""
+ - - Fn::GetAtt:
+ - s3EventNotificationBucketB63AEF24
+ - Arn
+ - /*
+ Version: "2012-10-17"
+ Metadata:
+ aws:cdk:path: direct-target/s3EventNotificationBucket/Policy/Resource
+ s3EventNotificationBucketAutoDeleteObjectsCustomResource751D0266:
+ Type: Custom::S3AutoDeleteObjects
+ Properties:
+ ServiceToken:
+ Fn::GetAtt:
+ - CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F
+ - Arn
+ BucketName:
+ Ref: s3EventNotificationBucketB63AEF24
+ DependsOn:
+ - s3EventNotificationBucketPolicyEEB8E6AC
+ UpdateReplacePolicy: Delete
+ DeletionPolicy: Delete
+ Metadata:
+ aws:cdk:path: direct-target/s3EventNotificationBucket/AutoDeleteObjectsCustomResource/Default
+ s3EventNotificationBucketNotifications87DCF05E:
+ Type: Custom::S3BucketNotifications
+ Properties:
+ ServiceToken:
+ Fn::GetAtt:
+ - BucketNotificationsHandler050a0587b7544547bf325f094a3db8347ECC3691
+ - Arn
+ BucketName:
+ Ref: s3EventNotificationBucketB63AEF24
+ NotificationConfiguration:
+ QueueConfigurations:
+ - Events:
+ - s3:ObjectRemoved:Delete
+ QueueArn:
+ Fn::GetAtt:
+ - 3EventNotificationQueue8DC270B4
+ - Arn
+ Managed: true
+ DependsOn:
+ - 3EventNotificationQueuePolicy8BC64376
+ - 3EventNotificationQueue8DC270B4
+ - s3EventNotificationBucketPolicyEEB8E6AC
+ Metadata:
+ aws:cdk:path: direct-target/s3EventNotificationBucket/Notifications/Resource
+ CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: "2012-10-17"
+ Statement:
+ - Action: sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service: lambda.amazonaws.com
+ ManagedPolicyArns:
+ - Fn::Sub: arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
+ Metadata:
+ aws:cdk:path: direct-target/Custom::S3AutoDeleteObjectsCustomResourceProvider/Role
+ CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F:
+ Type: AWS::Lambda::Function
+ Properties:
+ Code:
+ S3Bucket:
+ Fn::Sub: cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}
+ S3Key: faa95a81ae7d7373f3e1f242268f904eb748d8d0fdd306e8a6fe515a1905a7d6.zip
+ Timeout: 900
+ MemorySize: 128
+ Handler: index.handler
+ Role:
+ Fn::GetAtt:
+ - CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092
+ - Arn
+ Runtime:
+ Fn::FindInMap:
+ - LatestNodeRuntimeMap
+ - Ref: AWS::Region
+ - value
+ Description:
+ Fn::Join:
+ - ""
+ - - "Lambda function for auto-deleting objects in "
+ - Ref: s3EventNotificationBucketB63AEF24
+ - " S3 bucket."
+ DependsOn:
+ - CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092
+ Metadata:
+ aws:cdk:path: direct-target/Custom::S3AutoDeleteObjectsCustomResourceProvider/Handler
+ aws:asset:path: asset.faa95a81ae7d7373f3e1f242268f904eb748d8d0fdd306e8a6fe515a1905a7d6
+ aws:asset:property: Code
+ 3EventNotificationQueue8DC270B4:
+ Type: AWS::SQS::Queue
+ Properties:
+ ReceiveMessageWaitTimeSeconds: 5
+ VisibilityTimeout: 10
+ UpdateReplacePolicy: Delete
+ DeletionPolicy: Delete
+ Metadata:
+ aws:cdk:path: direct-target/3EventNotificationQueue/Resource
+ 3EventNotificationQueuePolicy8BC64376:
+ Type: AWS::SQS::QueuePolicy
+ Properties:
+ PolicyDocument:
+ Statement:
+ - Action:
+ - sqs:GetQueueAttributes
+ - sqs:GetQueueUrl
+ - sqs:SendMessage
+ Condition:
+ ArnLike:
+ aws:SourceArn:
+ Fn::GetAtt:
+ - s3EventNotificationBucketB63AEF24
+ - Arn
+ Effect: Allow
+ Principal:
+ Service: s3.amazonaws.com
+ Resource:
+ Fn::GetAtt:
+ - 3EventNotificationQueue8DC270B4
+ - Arn
+ Version: "2012-10-17"
+ Queues:
+ - Ref: 3EventNotificationQueue8DC270B4
+ Metadata:
+ aws:cdk:path: direct-target/3EventNotificationQueue/Policy/Resource
+ BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Statement:
+ - Action: sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service: lambda.amazonaws.com
+ Version: "2012-10-17"
+ ManagedPolicyArns:
+ - Fn::Join:
+ - ""
+ - - "arn:"
+ - Ref: AWS::Partition
+ - :iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
+ Metadata:
+ aws:cdk:path: direct-target/BucketNotificationsHandler050a0587b7544547bf325f094a3db834/Role/Resource
+ BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyDocument:
+ Statement:
+ - Action: s3:PutBucketNotification
+ Effect: Allow
+ Resource: "*"
+ Version: "2012-10-17"
+ PolicyName: BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36
+ Roles:
+ - Ref: BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC
+ Metadata:
+ aws:cdk:path: direct-target/BucketNotificationsHandler050a0587b7544547bf325f094a3db834/Role/DefaultPolicy/Resource
+ BucketNotificationsHandler050a0587b7544547bf325f094a3db8347ECC3691:
+ Type: AWS::Lambda::Function
+ Properties:
+ Description: AWS CloudFormation handler for "Custom::S3BucketNotifications" resources (@aws-cdk/aws-s3)
+ Code:
+ ZipFile: |
+ import boto3 # type: ignore
+ import json
+ import logging
+ import urllib.request
+
+ s3 = boto3.client("s3")
+
+ EVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'
+ CONFIGURATION_TYPES = ["TopicConfigurations", "QueueConfigurations", "LambdaFunctionConfigurations"]
+
+ def handler(event: dict, context):
+ response_status = "SUCCESS"
+ error_message = ""
+ try:
+ props = event["ResourceProperties"]
+ notification_configuration = props["NotificationConfiguration"]
+ managed = props.get('Managed', 'true').lower() == 'true'
+ stack_id = event['StackId']
+ old = event.get("OldResourceProperties", {}).get("NotificationConfiguration", {})
+ if managed:
+ config = handle_managed(event["RequestType"], notification_configuration)
+ else:
+ config = handle_unmanaged(props["BucketName"], stack_id, event["RequestType"], notification_configuration, old)
+ s3.put_bucket_notification_configuration(Bucket=props["BucketName"], NotificationConfiguration=config)
+ except Exception as e:
+ logging.exception("Failed to put bucket notification configuration")
+ response_status = "FAILED"
+ error_message = f"Error: {str(e)}. "
+ finally:
+ submit_response(event, context, response_status, error_message)
+
+ def handle_managed(request_type, notification_configuration):
+ if request_type == 'Delete':
+ return {}
+ return notification_configuration
+
+ def handle_unmanaged(bucket, stack_id, request_type, notification_configuration, old):
+ def with_id(n):
+ n['Id'] = f"{stack_id}-{hash(json.dumps(n, sort_keys=True))}"
+ return n
+
+ external_notifications = {}
+ existing_notifications = s3.get_bucket_notification_configuration(Bucket=bucket)
+ for t in CONFIGURATION_TYPES:
+ if request_type == 'Update':
+ ids = [with_id(n) for n in old.get(t, [])]
+ old_incoming_ids = [n['Id'] for n in ids]
+ external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'] in old_incoming_ids]
+ elif request_type == 'Delete':
+ external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f"{stack_id}-")]
+ elif request_type == 'Create':
+ external_notifications[t] = [n for n in existing_notifications.get(t, [])]
+ if EVENTBRIDGE_CONFIGURATION in existing_notifications:
+ external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]
+
+ if request_type == 'Delete':
+ return external_notifications
+
+ notifications = {}
+ for t in CONFIGURATION_TYPES:
+ external = external_notifications.get(t, [])
+ incoming = [with_id(n) for n in notification_configuration.get(t, [])]
+ notifications[t] = external + incoming
+
+ if EVENTBRIDGE_CONFIGURATION in notification_configuration:
+ notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]
+ elif EVENTBRIDGE_CONFIGURATION in external_notifications:
+ notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]
+
+ return notifications
+
+ def submit_response(event: dict, context, response_status: str, error_message: str):
+ response_body = json.dumps(
+ {
+ "Status": response_status,
+ "Reason": f"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}",
+ "PhysicalResourceId": event.get("PhysicalResourceId") or event["LogicalResourceId"],
+ "StackId": event["StackId"],
+ "RequestId": event["RequestId"],
+ "LogicalResourceId": event["LogicalResourceId"],
+ "NoEcho": False,
+ }
+ ).encode("utf-8")
+ headers = {"content-type": "", "content-length": str(len(response_body))}
+ try:
+ req = urllib.request.Request(url=event["ResponseURL"], headers=headers, data=response_body, method="PUT")
+ with urllib.request.urlopen(req) as response:
+ print(response.read().decode("utf-8"))
+ print("Status code: " + response.reason)
+ except Exception as e:
+ print("send(..) failed executing request.urlopen(..): " + str(e))
+ Handler: index.handler
+ Role:
+ Fn::GetAtt:
+ - BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC
+ - Arn
+ Runtime: python3.11
+ Timeout: 300
+ DependsOn:
+ - BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36
+ - BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC
+ Metadata:
+ aws:cdk:path: direct-target/BucketNotificationsHandler050a0587b7544547bf325f094a3db834/Resource
+ CDKMetadata:
+ Type: AWS::CDK::Metadata
+ Properties:
+ Analytics: v2:deflate64:H4sIAAAAAAAA/12PzQ6CMAzHn4X7qIImnoWbiQniA5g5JqnAhuumMQvvrjISiJf+P/rroSkk2x2kEX9RLKombvEK/my5aNi3unjagM+caKRl+U1NLkihWxTvuQ55YPQg8CcnnfztghnnfLCIA0PegS91O+KjzuDEDCx3ZHVXStLOiEAu/JH3Par6jyqMfmIlTcZJsj2RtN/H6h+3vM21qtCiVgM78CdfJTtYQ5JEd0KMjVMWOwll0A+4dEYELgEAAA==
+ Metadata:
+ aws:cdk:path: direct-target/CDKMetadata/Default
+ Condition: CDKMetadataAvailable
+Mappings:
+ LatestNodeRuntimeMap:
+ af-south-1:
+ value: nodejs20.x
+ ap-east-1:
+ value: nodejs20.x
+ ap-northeast-1:
+ value: nodejs20.x
+ ap-northeast-2:
+ value: nodejs20.x
+ ap-northeast-3:
+ value: nodejs20.x
+ ap-south-1:
+ value: nodejs20.x
+ ap-south-2:
+ value: nodejs20.x
+ ap-southeast-1:
+ value: nodejs20.x
+ ap-southeast-2:
+ value: nodejs20.x
+ ap-southeast-3:
+ value: nodejs20.x
+ ap-southeast-4:
+ value: nodejs20.x
+ ca-central-1:
+ value: nodejs20.x
+ cn-north-1:
+ value: nodejs18.x
+ cn-northwest-1:
+ value: nodejs18.x
+ eu-central-1:
+ value: nodejs20.x
+ eu-central-2:
+ value: nodejs20.x
+ eu-north-1:
+ value: nodejs20.x
+ eu-south-1:
+ value: nodejs20.x
+ eu-south-2:
+ value: nodejs20.x
+ eu-west-1:
+ value: nodejs20.x
+ eu-west-2:
+ value: nodejs20.x
+ eu-west-3:
+ value: nodejs20.x
+ il-central-1:
+ value: nodejs20.x
+ me-central-1:
+ value: nodejs20.x
+ me-south-1:
+ value: nodejs20.x
+ sa-east-1:
+ value: nodejs20.x
+ us-east-1:
+ value: nodejs20.x
+ us-east-2:
+ value: nodejs20.x
+ us-west-1:
+ value: nodejs20.x
+ us-west-2:
+ value: nodejs20.x
+Conditions:
+ CDKMetadataAvailable:
+ Fn::Or:
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - af-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-east-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-northeast-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-northeast-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-southeast-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-southeast-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ca-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - cn-north-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - cn-northwest-1
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-north-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-3
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - il-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - me-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - me-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - sa-east-1
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-east-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-east-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-west-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-west-2
+Parameters:
+ BootstrapVersion:
+ Type: AWS::SSM::Parameter::Value
+ Default: /cdk-bootstrap/hnb659fds/version
+ Description: Version of the CDK Bootstrap resources in this environment, automatically retrieved from SSM Parameter Store. [cdk:skip]
+Rules:
+ CheckBootstrapVersion:
+ Assertions:
+ - Assert:
+ Fn::Not:
+ - Fn::Contains:
+ - - "1"
+ - "2"
+ - "3"
+ - "4"
+ - "5"
+ - Ref: BootstrapVersion
+ AssertDescription: CDK bootstrap stack version 6 required. Please run 'cdk bootstrap' with a recent version of the CDK CLI.
+
diff --git a/javav2/example_code/s3/src/main/resources/log4j2.xml b/javav2/example_code/s3/src/main/resources/log4j2.xml
index 4c0eb20893a..e6ac524bde5 100644
--- a/javav2/example_code/s3/src/main/resources/log4j2.xml
+++ b/javav2/example_code/s3/src/main/resources/log4j2.xml
@@ -19,6 +19,8 @@
+
+
diff --git a/javav2/example_code/s3/src/main/resources/queue-topic.yaml b/javav2/example_code/s3/src/main/resources/queue-topic.yaml
new file mode 100644
index 00000000000..fc5404dd7c3
--- /dev/null
+++ b/javav2/example_code/s3/src/main/resources/queue-topic.yaml
@@ -0,0 +1,302 @@
+Resources:
+ s3EventBucket56513D52:
+ Type: AWS::S3::Bucket
+ Properties:
+ Tags:
+ - Key: aws-cdk:auto-delete-objects
+ Value: "true"
+ UpdateReplacePolicy: Delete
+ DeletionPolicy: Delete
+ Metadata:
+ aws:cdk:path: queue-topic/s3EventBucket/Resource
+ s3EventBucketPolicy9710F554:
+ Type: AWS::S3::BucketPolicy
+ Properties:
+ Bucket:
+ Ref: s3EventBucket56513D52
+ PolicyDocument:
+ Statement:
+ - Action:
+ - s3:DeleteObject*
+ - s3:GetBucket*
+ - s3:List*
+ - s3:PutBucketPolicy
+ Effect: Allow
+ Principal:
+ AWS:
+ Fn::GetAtt:
+ - CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092
+ - Arn
+ Resource:
+ - Fn::GetAtt:
+ - s3EventBucket56513D52
+ - Arn
+ - Fn::Join:
+ - ""
+ - - Fn::GetAtt:
+ - s3EventBucket56513D52
+ - Arn
+ - /*
+ Version: "2012-10-17"
+ Metadata:
+ aws:cdk:path: queue-topic/s3EventBucket/Policy/Resource
+ s3EventBucketAutoDeleteObjectsCustomResource87E19A8C:
+ Type: Custom::S3AutoDeleteObjects
+ Properties:
+ ServiceToken:
+ Fn::GetAtt:
+ - CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F
+ - Arn
+ BucketName:
+ Ref: s3EventBucket56513D52
+ DependsOn:
+ - s3EventBucketPolicy9710F554
+ UpdateReplacePolicy: Delete
+ DeletionPolicy: Delete
+ Metadata:
+ aws:cdk:path: queue-topic/s3EventBucket/AutoDeleteObjectsCustomResource/Default
+ CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Version: "2012-10-17"
+ Statement:
+ - Action: sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service: lambda.amazonaws.com
+ ManagedPolicyArns:
+ - Fn::Sub: arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
+ Metadata:
+ aws:cdk:path: queue-topic/Custom::S3AutoDeleteObjectsCustomResourceProvider/Role
+ CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F:
+ Type: AWS::Lambda::Function
+ Properties:
+ Code:
+ S3Bucket:
+ Fn::Sub: cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}
+ S3Key: faa95a81ae7d7373f3e1f242268f904eb748d8d0fdd306e8a6fe515a1905a7d6.zip
+ Timeout: 900
+ MemorySize: 128
+ Handler: index.handler
+ Role:
+ Fn::GetAtt:
+ - CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092
+ - Arn
+ Runtime:
+ Fn::FindInMap:
+ - LatestNodeRuntimeMap
+ - Ref: AWS::Region
+ - value
+ Description:
+ Fn::Join:
+ - ""
+ - - "Lambda function for auto-deleting objects in "
+ - Ref: s3EventBucket56513D52
+ - " S3 bucket."
+ DependsOn:
+ - CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092
+ Metadata:
+ aws:cdk:path: queue-topic/Custom::S3AutoDeleteObjectsCustomResourceProvider/Handler
+ aws:asset:path: asset.faa95a81ae7d7373f3e1f242268f904eb748d8d0fdd306e8a6fe515a1905a7d6
+ aws:asset:property: Code
+ 3EventQueueD059F0AE:
+ Type: AWS::SQS::Queue
+ Properties:
+ ReceiveMessageWaitTimeSeconds: 5
+ VisibilityTimeout: 10
+ UpdateReplacePolicy: Delete
+ DeletionPolicy: Delete
+ Metadata:
+ aws:cdk:path: queue-topic/3EventQueue/Resource
+ Subscriber1956AC64:
+ Type: AWS::SQS::Queue
+ Properties:
+ ReceiveMessageWaitTimeSeconds: 5
+ VisibilityTimeout: 10
+ UpdateReplacePolicy: Delete
+ DeletionPolicy: Delete
+ Metadata:
+ aws:cdk:path: queue-topic/Subscriber/Resource
+ 3EventTopicBC6D4CD4:
+ Type: AWS::SNS::Topic
+ Metadata:
+ aws:cdk:path: queue-topic/3EventTopic/Resource
+ 3EventSubscription83516A75:
+ Type: AWS::SNS::Subscription
+ Properties:
+ Endpoint:
+ Fn::GetAtt:
+ - Subscriber1956AC64
+ - Arn
+ Protocol: sqs
+ TopicArn:
+ Ref: 3EventTopicBC6D4CD4
+ Metadata:
+ aws:cdk:path: queue-topic/3EventSubscription/Resource
+ CDKMetadata:
+ Type: AWS::CDK::Metadata
+ Properties:
+ Analytics: v2:deflate64:H4sIAAAAAAAA/12PwQ6CMBBEv4V7qYImnIWbiQmCd1NKISvQYrfFGMK/W8BE4mlm3k4m2ZAGx4iGHnuhz8vGb6GgY24Yb4hD9xEPdIwtb4QhSSW/bpVUtcDfP7zmieAT6Xi1wor5thgHpYM31QOf4WpyWyDX0BtQcqbbPE0ksWhUlwlUVvNla+svrO9B1n+tVKsBSqFjhoKcEIVxv9RLr5KJkiUs4+TMBrYLIrqnQeA9EMDXVhroBM1W/QCZoZgsGAEAAA==
+ Metadata:
+ aws:cdk:path: queue-topic/CDKMetadata/Default
+ Condition: CDKMetadataAvailable
+Mappings:
+ LatestNodeRuntimeMap:
+ af-south-1:
+ value: nodejs20.x
+ ap-east-1:
+ value: nodejs20.x
+ ap-northeast-1:
+ value: nodejs20.x
+ ap-northeast-2:
+ value: nodejs20.x
+ ap-northeast-3:
+ value: nodejs20.x
+ ap-south-1:
+ value: nodejs20.x
+ ap-south-2:
+ value: nodejs20.x
+ ap-southeast-1:
+ value: nodejs20.x
+ ap-southeast-2:
+ value: nodejs20.x
+ ap-southeast-3:
+ value: nodejs20.x
+ ap-southeast-4:
+ value: nodejs20.x
+ ca-central-1:
+ value: nodejs20.x
+ cn-north-1:
+ value: nodejs18.x
+ cn-northwest-1:
+ value: nodejs18.x
+ eu-central-1:
+ value: nodejs20.x
+ eu-central-2:
+ value: nodejs20.x
+ eu-north-1:
+ value: nodejs20.x
+ eu-south-1:
+ value: nodejs20.x
+ eu-south-2:
+ value: nodejs20.x
+ eu-west-1:
+ value: nodejs20.x
+ eu-west-2:
+ value: nodejs20.x
+ eu-west-3:
+ value: nodejs20.x
+ il-central-1:
+ value: nodejs20.x
+ me-central-1:
+ value: nodejs20.x
+ me-south-1:
+ value: nodejs20.x
+ sa-east-1:
+ value: nodejs20.x
+ us-east-1:
+ value: nodejs20.x
+ us-east-2:
+ value: nodejs20.x
+ us-west-1:
+ value: nodejs20.x
+ us-west-2:
+ value: nodejs20.x
+Conditions:
+ CDKMetadataAvailable:
+ Fn::Or:
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - af-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-east-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-northeast-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-northeast-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-southeast-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-southeast-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ca-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - cn-north-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - cn-northwest-1
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-north-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-3
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - il-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - me-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - me-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - sa-east-1
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-east-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-east-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-west-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-west-2
+Parameters:
+ BootstrapVersion:
+ Type: AWS::SSM::Parameter::Value
+ Default: /cdk-bootstrap/hnb659fds/version
+ Description: Version of the CDK Bootstrap resources in this environment, automatically retrieved from SSM Parameter Store. [cdk:skip]
+Rules:
+ CheckBootstrapVersion:
+ Assertions:
+ - Assert:
+ Fn::Not:
+ - Fn::Contains:
+ - - "1"
+ - "2"
+ - "3"
+ - "4"
+ - "5"
+ - Ref: BootstrapVersion
+ AssertDescription: CDK bootstrap stack version 6 required. Please run 'cdk bootstrap' with a recent version of the CDK CLI.
+
diff --git a/javav2/example_code/s3/src/test/java/com/example/s3/ProcessS3EventNotificationTest.java b/javav2/example_code/s3/src/test/java/com/example/s3/ProcessS3EventNotificationTest.java
new file mode 100644
index 00000000000..9d91d1a8701
--- /dev/null
+++ b/javav2/example_code/s3/src/test/java/com/example/s3/ProcessS3EventNotificationTest.java
@@ -0,0 +1,47 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.example.s3;
+
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import software.amazon.awssdk.services.sqs.SqsAsyncClient;
+import software.amazon.awssdk.services.sqs.model.QueueAttributeName;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+class ProcessS3EventNotificationTest {
+ static SqsAsyncClient sqsClient = ProcessS3EventNotification.sqsClient;
+
+ @BeforeAll
+ static void setUp() {
+ ProcessS3EventNotification.deployCloudFormationStack();
+ }
+
+ @AfterAll
+ static void tearDown() {
+ ProcessS3EventNotification.destroyCloudFormationStack();
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ void processS3EventsReadsProcessesAndDeletes() {
+ String queueUrl = ProcessS3EventNotification.getQueueUrl();
+ String queueArn = ProcessS3EventNotification.getQueueArn(queueUrl);
+ String bucketName = ProcessS3EventNotification.getBucketName();
+
+ ProcessS3EventNotification.processS3Events(bucketName, queueUrl, queueArn);
+
+ sqsClient.receiveMessage(r -> r
+ .queueUrl(queueUrl)
+ .maxNumberOfMessages(1)
+ ).thenAccept(receiveMessageResponse ->
+ assertEquals(0, receiveMessageResponse.messages().size())
+ ).join();
+ }
+}
\ No newline at end of file
diff --git a/javav2/example_code/s3/src/test/java/com/example/s3/PutBucketS3EventNotificationEventBridgeTest.java b/javav2/example_code/s3/src/test/java/com/example/s3/PutBucketS3EventNotificationEventBridgeTest.java
new file mode 100644
index 00000000000..6f74e43b838
--- /dev/null
+++ b/javav2/example_code/s3/src/test/java/com/example/s3/PutBucketS3EventNotificationEventBridgeTest.java
@@ -0,0 +1,107 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.example.s3;
+
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.sqs.SqsAsyncClient;
+import software.amazon.awssdk.services.sqs.model.ReceiveMessageResponse;
+import software.amazon.awssdk.transfer.s3.S3TransferManager;
+
+import java.net.URISyntaxException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+class PutBucketS3EventNotificationEventBridgeTest {
+ static String bucketName;
+ static String topicArn;
+ static String directToQueueUrl;
+ static String directToQueueArn;
+ static String subscriberQueueUrl;
+ static String subscriberQueueArn;
+ static SqsAsyncClient sqsClient = PutBucketS3EventNotificationEventBridge.sqsClient;
+ private static final Logger logger = LoggerFactory.getLogger(PutBucketS3EventNotificationEventBridgeTest.class);
+ static S3TransferManager transferManager = S3TransferManager.create();
+
+ @BeforeAll
+ static void setUp() {
+ PutBucketS3EventNotificationEventBridge.deployCloudFormationStack();
+ bucketName = PutBucketS3EventNotificationEventBridge.getBucketName();
+ topicArn = PutBucketS3EventNotificationEventBridge.getTopicArn();
+ directToQueueUrl = PutBucketS3EventNotificationEventBridge.getQueueUrl(false);
+ directToQueueArn = PutBucketS3EventNotificationEventBridge.getQueueArn(directToQueueUrl);
+ subscriberQueueUrl = PutBucketS3EventNotificationEventBridge.getQueueUrl(true);
+ subscriberQueueArn = PutBucketS3EventNotificationEventBridge.getQueueArn(subscriberQueueUrl);
+ }
+
+ @AfterAll
+ static void tearDown() {
+ PutBucketS3EventNotificationEventBridge.deleteRule();
+ PutBucketS3EventNotificationEventBridge.destroyCloudFormationStack();
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ void setBucketNotificationToEventBridge() {
+
+ String ruleArn = PutBucketS3EventNotificationEventBridge.setBucketNotificationToEventBridge(bucketName, topicArn, directToQueueArn);
+ PutBucketS3EventNotificationEventBridge.addPermissions(directToQueueArn, directToQueueUrl,
+ subscriberQueueArn, subscriberQueueUrl, topicArn, ruleArn);
+ triggerS3EventMessages();
+ try {
+ Thread.sleep(Duration.ofSeconds(30).toMillis()); // Wait for messages to route through.
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+
+ List urls = getQueueUrls();
+ urls.forEach(url -> {
+ ReceiveMessageResponse response = sqsClient.receiveMessage(b -> b
+ .queueUrl(url)).join();
+ logger.info("Messages received at queue {}: {}", url, response);
+ assertTrue(response.hasMessages());
+ }
+ );
+ }
+
+ static void triggerS3EventMessages() {
+ Path uploadDir;
+ try {
+ uploadDir = Paths.get(
+ PutBucketS3EventNotificationEventBridge.class.getClassLoader().getResource("uploadDirectory").toURI());
+ } catch (URISyntaxException e) {
+ throw new RuntimeException(e);
+ }
+
+ transferManager.uploadDirectory(b -> b
+ .bucket(bucketName)
+ .source(uploadDir)
+ .build()).completionFuture()
+ .whenComplete((completedUpload, t) -> {
+ if (t != null) {
+ logger.error("Failed to upload directory", t);
+ return;
+ }
+ completedUpload.failedTransfers().forEach(failedUpload ->
+ logger.error("Object {} failed to upload with exception {}",
+ failedUpload.request().putObjectRequest().key(),
+ failedUpload.exception().getMessage())
+ );
+ }).join();
+ }
+
+ static List getQueueUrls() {
+ return sqsClient.listQueues()
+ .thenApply(r -> new ArrayList<>(r.queueUrls())).join();
+ }
+}
\ No newline at end of file
diff --git a/javav2/example_code/sqs/README.md b/javav2/example_code/sqs/README.md
index 06635698724..d8d7935f0d7 100644
--- a/javav2/example_code/sqs/README.md
+++ b/javav2/example_code/sqs/README.md
@@ -53,6 +53,7 @@ Code examples that show you how to accomplish a specific task by calling multipl
functions within the same service.
- [Create and publish to a FIFO topic](../sns/src/main/java/com/example/sns/PriceUpdateExample.java)
+- [Process S3 event notifications](../s3/src/main/java/com/example/s3/ProcessS3EventNotification.java)
- [Publish messages to queues](../../usecases/topics_and_queues/src/main/java/com/example/sns/SNSWorkflow.java)
@@ -85,6 +86,18 @@ This example shows you how to create and publish to a FIFO Amazon SNS topic.
+#### Process S3 event notifications
+
+This example shows you how to work with S3 event notifications in an object-oriented way.
+
+
+
+
+
+
+
+
+
#### Publish messages to queues
This example shows you how to do the following:
diff --git a/resources/cdk/javav2_s3_event_notification/README.md b/resources/cdk/javav2_s3_event_notification/README.md
new file mode 100644
index 00000000000..80bcd8c7f76
--- /dev/null
+++ b/resources/cdk/javav2_s3_event_notification/README.md
@@ -0,0 +1,26 @@
+# Java CDK app for stacks used in Java v2 examples
+
+This directory contains Java CDK application that produce stacks for code examples.
+
+The following Java v2 code examples use stacks produced the application in this directory:
+
+* [PutBucketS3EventNotificationEventBridge.java](../../javav2/example_code/s3/src/main/java/com/example/s3/PutBucketS3EventNotificationEventBridge.java)
+ * uses stack named `queue-topic`
+* [ProcessS3EventNotificationTest.java](../../javav2/example_code/s3/src/test/java/com/example/s3/ProcessS3EventNotificationTest.java)
+ * uses stack named `direct-target`
+
+
+The `cdk.json` file tells the CDK Toolkit how to execute your app.
+
+It is a [Maven](https://maven.apache.org/) based project, so you can open this project with any Maven compatible Java IDE to build and run tests.
+
+## Useful commands
+
+ * `mvn package` compile and run tests
+ * `cdk ls` list all stacks in the app
+ * `cdk synth` emits the synthesized CloudFormation template
+ * `cdk deploy` deploy this stack to your default AWS account/region
+ * `cdk diff` compare deployed stack with current state
+ * `cdk docs` open CDK documentation
+
+Enjoy!
diff --git a/resources/cdk/javav2_s3_event_notification/cdk.json b/resources/cdk/javav2_s3_event_notification/cdk.json
new file mode 100644
index 00000000000..723b2f18b0c
--- /dev/null
+++ b/resources/cdk/javav2_s3_event_notification/cdk.json
@@ -0,0 +1,68 @@
+{
+ "app": "mvn -e -q compile exec:java",
+ "watch": {
+ "include": [
+ "**"
+ ],
+ "exclude": [
+ "README.md",
+ "cdk*.json",
+ "target",
+ "pom.xml",
+ "src/test"
+ ]
+ },
+ "context": {
+ "@aws-cdk/aws-lambda:recognizeLayerVersion": true,
+ "@aws-cdk/core:checkSecretUsage": true,
+ "@aws-cdk/core:target-partitions": [
+ "aws",
+ "aws-cn"
+ ],
+ "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
+ "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
+ "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true,
+ "@aws-cdk/aws-iam:minimizePolicies": true,
+ "@aws-cdk/core:validateSnapshotRemovalPolicy": true,
+ "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
+ "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
+ "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
+ "@aws-cdk/aws-apigateway:disableCloudWatchRole": true,
+ "@aws-cdk/core:enablePartitionLiterals": true,
+ "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true,
+ "@aws-cdk/aws-iam:standardizedServicePrincipals": true,
+ "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true,
+ "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true,
+ "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true,
+ "@aws-cdk/aws-route53-patters:useCertificate": true,
+ "@aws-cdk/customresources:installLatestAwsSdkDefault": false,
+ "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true,
+ "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true,
+ "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true,
+ "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true,
+ "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true,
+ "@aws-cdk/aws-redshift:columnId": true,
+ "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true,
+ "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true,
+ "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true,
+ "@aws-cdk/aws-kms:aliasNameRef": true,
+ "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true,
+ "@aws-cdk/core:includePrefixInUniqueNameGeneration": true,
+ "@aws-cdk/aws-efs:denyAnonymousAccess": true,
+ "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true,
+ "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true,
+ "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true,
+ "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true,
+ "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true,
+ "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true,
+ "@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForCodeCommitSource": true,
+ "@aws-cdk/aws-cloudwatch-actions:changeLambdaPermissionLogicalIdForLambdaAction": true,
+ "@aws-cdk/aws-codepipeline:crossAccountKeysDefaultValueToFalse": true,
+ "@aws-cdk/aws-codepipeline:defaultPipelineTypeToV2": true,
+ "@aws-cdk/aws-kms:reduceCrossAccountRegionPolicyScope": true,
+ "@aws-cdk/aws-eks:nodegroupNameAttribute": true,
+ "@aws-cdk/aws-ec2:ebsDefaultGp3Volume": true,
+ "@aws-cdk/aws-ecs:removeDefaultDeploymentAlarm": true,
+ "@aws-cdk/custom-resources:logApiResponseDataPropertyTrueDefault": false
+ }
+}
diff --git a/resources/cdk/javav2_s3_event_notification/pom.xml b/resources/cdk/javav2_s3_event_notification/pom.xml
new file mode 100644
index 00000000000..eaba8e55162
--- /dev/null
+++ b/resources/cdk/javav2_s3_event_notification/pom.xml
@@ -0,0 +1,60 @@
+
+
+ 4.0.0
+
+ com.myorg
+ javav2_s3_event_notification
+ 0.1
+
+
+ UTF-8
+ 2.147.2
+ [10.0.0,11.0.0)
+ 5.7.1
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.11.0
+
+ 17
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 3.1.0
+
+ com.myorg.Javav2S3EventNotificationApp
+
+
+
+
+
+
+
+
+ software.amazon.awscdk
+ aws-cdk-lib
+ ${cdk.version}
+
+
+
+ software.constructs
+ constructs
+ ${constructs.version}
+
+
+
+ org.junit.jupiter
+ junit-jupiter
+ ${junit.version}
+ test
+
+
+
diff --git a/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/DirectTargetStack.java b/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/DirectTargetStack.java
new file mode 100644
index 00000000000..f0b6c61cd11
--- /dev/null
+++ b/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/DirectTargetStack.java
@@ -0,0 +1,38 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.myorg;
+
+import software.amazon.awscdk.Duration;
+import software.amazon.awscdk.RemovalPolicy;
+import software.amazon.awscdk.Stack;
+import software.amazon.awscdk.StackProps;
+import software.amazon.awscdk.services.s3.Bucket;
+import software.amazon.awscdk.services.s3.EventType;
+import software.amazon.awscdk.services.s3.notifications.SnsDestination;
+import software.amazon.awscdk.services.s3.notifications.SqsDestination;
+import software.amazon.awscdk.services.sns.Topic;
+import software.amazon.awscdk.services.sqs.Queue;
+import software.constructs.Construct;
+
+public class DirectTargetStack extends Stack {
+ public DirectTargetStack(final Construct scope, final String id) {
+ this(scope, id, null);
+ }
+
+ public DirectTargetStack(final Construct scope, final String id, final StackProps props) {
+ super(scope, id, props);
+
+ final Bucket bucket = Bucket.Builder.create(this, "s3EventNotificationBucket")
+ .removalPolicy(RemovalPolicy.DESTROY)
+ .autoDeleteObjects(true)
+ .build();
+
+ final Queue queue = Queue.Builder.create(this, "3EventNotificationQueue")
+ .visibilityTimeout(Duration.seconds(10))
+ .receiveMessageWaitTime(Duration.seconds(5))
+ .build();
+
+ bucket.addEventNotification(EventType.OBJECT_REMOVED_DELETE, new SqsDestination(queue));
+ }
+}
diff --git a/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/EventBridgeStack.java b/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/EventBridgeStack.java
new file mode 100644
index 00000000000..f1ae6df5afb
--- /dev/null
+++ b/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/EventBridgeStack.java
@@ -0,0 +1,65 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.myorg;
+
+import software.amazon.awscdk.Duration;
+import software.amazon.awscdk.RemovalPolicy;
+import software.amazon.awscdk.Stack;
+import software.amazon.awscdk.StackProps;
+import software.amazon.awscdk.services.events.EventBus;
+import software.amazon.awscdk.services.events.EventPattern;
+import software.amazon.awscdk.services.events.Rule;
+import software.amazon.awscdk.services.events.targets.SnsTopic;
+import software.amazon.awscdk.services.events.targets.SqsQueue;
+import software.amazon.awscdk.services.s3.Bucket;
+import software.amazon.awscdk.services.sns.Topic;
+import software.amazon.awscdk.services.sqs.Queue;
+import software.constructs.Construct;
+
+import java.util.List;
+import java.util.Map;
+
+public class EventBridgeStack extends Stack {
+ public EventBridgeStack(final Construct scope, final String id) {
+ this(scope, id, null);
+ }
+
+ public EventBridgeStack(final Construct scope, final String id, final StackProps props) {
+ super(scope, id, props);
+
+ final Bucket bucket = Bucket.Builder.create(this, "s3EventNotificationBucket")
+ .removalPolicy(RemovalPolicy.DESTROY)
+ .autoDeleteObjects(true)
+ .eventBridgeEnabled(true)
+ .build();
+
+ final Queue queue = Queue.Builder.create(this, "3EventNotificationQueue")
+ .visibilityTimeout(Duration.seconds(10))
+ .receiveMessageWaitTime(Duration.seconds(5))
+ .build();
+
+ final Topic topic = Topic.Builder.create(this, "3EventNotificationTopic")
+ .build();
+
+ EventBus eventBus = EventBus.Builder.create(this, "3EventBus")
+ .eventBusName("3EventBus")
+ .build();
+
+ EventPattern objectCreatedPattern = EventPattern.builder()
+ .source(List.of("aws.s3"))
+ .detailType(List.of("Object Created"))
+ .detail(Map.of("bucket", List.of(bucket.getBucketName())))
+ .build();
+
+ Rule.Builder.create(this, "3EventBusRule")
+ .description("3EventBusRule")
+ .eventBus(eventBus)
+ .eventPattern(objectCreatedPattern)
+ .targets(List.of(
+ SqsQueue.Builder.create(queue).build(),
+ SnsTopic.Builder.create(topic).build())
+ )
+ .build();
+ }
+}
diff --git a/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/Javav2S3EventNotificationApp.java b/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/Javav2S3EventNotificationApp.java
new file mode 100644
index 00000000000..3e5ab1a73f1
--- /dev/null
+++ b/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/Javav2S3EventNotificationApp.java
@@ -0,0 +1,21 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.myorg;
+
+import software.amazon.awscdk.App;
+import software.amazon.awscdk.StackProps;
+
+public class Javav2S3EventNotificationApp {
+ public static void main(final String[] args) {
+ App app = new App();
+
+ new DirectTargetStack(app, "direct-target", StackProps.builder()
+ .build());
+ new EventBridgeStack(app, "event-bridge", StackProps.builder()
+ .build());
+ new QueueTopicStack(app, "queue-topic", StackProps.builder()
+ .build());
+ app.synth();
+ }
+}
\ No newline at end of file
diff --git a/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/QueueTopicStack.java b/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/QueueTopicStack.java
new file mode 100644
index 00000000000..f97100a4f9c
--- /dev/null
+++ b/resources/cdk/javav2_s3_event_notification/src/main/java/com/myorg/QueueTopicStack.java
@@ -0,0 +1,51 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.myorg;
+
+import software.amazon.awscdk.Duration;
+import software.amazon.awscdk.RemovalPolicy;
+import software.amazon.awscdk.Stack;
+import software.amazon.awscdk.StackProps;
+import software.amazon.awscdk.services.s3.Bucket;
+import software.amazon.awscdk.services.sns.Subscription;
+import software.amazon.awscdk.services.sns.SubscriptionProtocol;
+import software.amazon.awscdk.services.sns.Topic;
+import software.amazon.awscdk.services.sqs.Queue;
+import software.constructs.Construct;
+
+public class QueueTopicStack extends Stack {
+ public QueueTopicStack(final Construct scope, final String id) {
+ this(scope, id, null);
+ }
+
+ public QueueTopicStack(final Construct scope, final String id, final StackProps props) {
+ super(scope, id, props);
+
+ Bucket.Builder.create(this, "s3EventBucket")
+ .removalPolicy(RemovalPolicy.DESTROY)
+ .autoDeleteObjects(true)
+ .build();
+
+ Queue.Builder.create(this, "3EventQueue")
+ .visibilityTimeout(Duration.seconds(10))
+ .receiveMessageWaitTime(Duration.seconds(5))
+ .removalPolicy(RemovalPolicy.DESTROY)
+ .build();
+
+ Queue subscriberQueue = Queue.Builder.create(this, "Subscriber")
+ .visibilityTimeout(Duration.seconds(10))
+ .receiveMessageWaitTime(Duration.seconds(5))
+ .removalPolicy(RemovalPolicy.DESTROY)
+ .build();
+
+ Topic notificationTopic = Topic.Builder.create(this, "3EventTopic")
+ .build();
+
+ Subscription.Builder.create(this, "3EventSubscription")
+ .topic(notificationTopic)
+ .endpoint(subscriberQueue.getQueueArn())
+ .protocol(SubscriptionProtocol.SQS)
+ .build();
+ }
+}
From 41b8972849f6bfcba8efb719c07758edefe7033f Mon Sep 17 00:00:00 2001
From: Corey Pyle
Date: Fri, 19 Jul 2024 11:52:00 -0400
Subject: [PATCH 02/98] Use latest -tool release. (#6672)
---
.github/workflows/validate-doc-metadata.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/validate-doc-metadata.yml b/.github/workflows/validate-doc-metadata.yml
index 5e63ab4aa7f..3bb805320f3 100644
--- a/.github/workflows/validate-doc-metadata.yml
+++ b/.github/workflows/validate-doc-metadata.yml
@@ -16,7 +16,7 @@ jobs:
- name: checkout repo content
uses: actions/checkout@v4
- name: validate metadata
- uses: awsdocs/aws-doc-sdk-examples-tools@2024-07-11-A
+ uses: awsdocs/aws-doc-sdk-examples-tools@2024-07-19-A
with:
doc_gen_only: "False"
strict_titles: "True"
From 75c3daadf750406156fc87fa30ee499a206b4a36 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:54:16 -0400
Subject: [PATCH 03/98] Bump rexml from 3.2.8 to 3.3.2 in /ruby (#6673)
Bumps [rexml](https://github.com/ruby/rexml) from 3.2.8 to 3.3.2.
- [Release notes](https://github.com/ruby/rexml/releases)
- [Changelog](https://github.com/ruby/rexml/blob/master/NEWS.md)
- [Commits](https://github.com/ruby/rexml/compare/v3.2.8...v3.3.2)
---
updated-dependencies:
- dependency-name: rexml
dependency-type: indirect
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
ruby/Gemfile.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ruby/Gemfile.lock b/ruby/Gemfile.lock
index 677f2649859..7b50371f144 100644
--- a/ruby/Gemfile.lock
+++ b/ruby/Gemfile.lock
@@ -1443,8 +1443,8 @@ GEM
rainbow (3.1.1)
rake (13.0.6)
regexp_parser (2.8.0)
- rexml (3.2.8)
- strscan (>= 3.0.9)
+ rexml (3.3.2)
+ strscan
rspec (3.12.0)
rspec-core (~> 3.12.0)
rspec-expectations (~> 3.12.0)
From d7a2154f778daf22eaf5e2c16e4f0a0e97db09b7 Mon Sep 17 00:00:00 2001
From: Corey Pyle
Date: Mon, 22 Jul 2024 15:02:36 -0400
Subject: [PATCH 04/98] Use latest -tools commit. (#6674)
---
.github/workflows/validate-doc-metadata.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/validate-doc-metadata.yml b/.github/workflows/validate-doc-metadata.yml
index 3bb805320f3..218d1623a80 100644
--- a/.github/workflows/validate-doc-metadata.yml
+++ b/.github/workflows/validate-doc-metadata.yml
@@ -16,7 +16,7 @@ jobs:
- name: checkout repo content
uses: actions/checkout@v4
- name: validate metadata
- uses: awsdocs/aws-doc-sdk-examples-tools@2024-07-19-A
+ uses: awsdocs/aws-doc-sdk-examples-tools@2024-07-22-A
with:
doc_gen_only: "False"
strict_titles: "True"
From 099974baa3503e3c2d7a60c2807b2b3c552b6d93 Mon Sep 17 00:00:00 2001
From: Dennis Traub <604796+DennisTraub@users.noreply.github.com>
Date: Tue, 23 Jul 2024 09:14:31 -0700
Subject: [PATCH 05/98] Updates to the Bedrock Studio bootstrap script (#6669)
* Bump boto to 1.34.144
* Adapt bootstrapper script to new version.
* Add confirmation prompts for deletion
* Add permissions for prompt flows
---------
Co-authored-by: Dennis Traub
---
python/example_code/bedrock/requirements.txt | 4 +-
.../scenarios/bedrock_studio_bootstrapper.py | 840 ++++++++++--------
2 files changed, 465 insertions(+), 379 deletions(-)
diff --git a/python/example_code/bedrock/requirements.txt b/python/example_code/bedrock/requirements.txt
index 21e87d9e568..d7940eedcc1 100644
--- a/python/example_code/bedrock/requirements.txt
+++ b/python/example_code/bedrock/requirements.txt
@@ -1,5 +1,5 @@
-boto3==1.34.109
-botocore==1.34.109
+boto3==1.34.144
+botocore==1.34.144
colorama==0.4.6
iniconfig==2.0.0
jmespath==1.0.1
diff --git a/python/example_code/bedrock/scenarios/bedrock_studio_bootstrapper.py b/python/example_code/bedrock/scenarios/bedrock_studio_bootstrapper.py
index 8530a6336d0..bcc5dda678a 100644
--- a/python/example_code/bedrock/scenarios/bedrock_studio_bootstrapper.py
+++ b/python/example_code/bedrock/scenarios/bedrock_studio_bootstrapper.py
@@ -309,7 +309,18 @@ def _create_service_role(self):
self._get_service_role_policy(),
)
+ def _create_permission_boundary(self):
+ logger.info("=" * 80)
+ logger.info("Step 3: Create Permission Boundary Policy.")
+ logger.info("-" * 80)
+
+ self._create_policy(
+ self._permission_boundary_policy_name, self._get_permission_boundary()
+ )
+
def _create_role(self, role_name, trust_policy, role_policy):
+ inline_policy_name = "InlinePolicy"
+
logger.info(f"Creating role: '{role_name}'...")
try:
response = self._iam_client.create_role(
@@ -319,37 +330,80 @@ def _create_role(self, role_name, trust_policy, role_policy):
logger.info(f"Role created: {role_arn}")
except self._iam_client.exceptions.EntityAlreadyExistsException:
logger.warning(f"Role with name '{role_name}' already exists.")
+ try:
+ self._iam_client.get_role_policy(
+ RoleName=role_name,
+ PolicyName=inline_policy_name,
+ )
+ confirm_input = input(
+ f"Proceed to replace the existing inline policy named '{inline_policy_name}'?"
+ " (yes/no, default: yes) "
+ ).lower()
+ if not (confirm_input in ["", "y" "yes"]):
+ logger.warning(f"Not updating existing '{role_name}' role.")
+ return
+ except self._iam_client.exceptions.NoSuchEntityException:
+ pass
logger.info(f"Attaching inline policy to '{role_name}'...")
- try:
- self._iam_client.put_role_policy(
- RoleName=role_name,
- PolicyName="InlinePolicy",
- PolicyDocument=role_policy,
- )
- logger.info(f"Inline policy successfully attached.")
- except self._iam_client.exceptions.EntityAlreadyExistsException:
- logger.warning("Inline policy already exists.")
-
- def _create_permission_boundary(self):
- logger.info("=" * 80)
- logger.info("Step 3: Create Permission Boundary.")
- logger.info("-" * 80)
-
- logger.info(
- f"Creating permission boundary: '{self._permission_boundary_policy_name}'..."
+ self._iam_client.put_role_policy(
+ RoleName=role_name,
+ PolicyName=inline_policy_name,
+ PolicyDocument=role_policy,
)
+ logger.info(f"Successfully attached inline policy to '{role_name}'.")
+ def _create_policy(self, policy_name, policy_document):
+ policy_arn = f"arn:aws:iam::{self._account_id}:policy/{self._permission_boundary_policy_name}"
+
+ logger.info(f"Creating policy: '{policy_name}'...")
try:
self._iam_client.create_policy(
- PolicyName=self._permission_boundary_policy_name,
- PolicyDocument=self._get_permission_boundary(),
+ PolicyName=policy_name,
+ PolicyDocument=policy_document,
)
- logger.info(f"Permission boundary policy created.")
+ logger.info(f"Policy created: {policy_arn}")
except self._iam_client.exceptions.EntityAlreadyExistsException:
- logger.warning(
- f"Policy with name '{self._permission_boundary_policy_name}' already exists."
+ logger.info(f"Policy with name '{policy_name}' already exists.")
+
+ policy_versions = self._iam_client.list_policy_versions(
+ PolicyArn=policy_arn
+ )["Versions"]
+ if len(policy_versions) >= 5:
+ logger.warning(
+ f"Cannot create more than 5 versions of '{policy_name}' policy."
+ )
+ sorted_policy_versions = sorted(
+ policy_versions, key=lambda x: x["CreateDate"]
+ )
+ oldest_non_default_version_id = next(
+ filter(lambda x: not x["IsDefaultVersion"], sorted_policy_versions)
+ )["VersionId"]
+ confirm_input = input(
+ f"Proceed to delete the oldest non-default version '{oldest_non_default_version_id}'?"
+ " (yes/no, default: yes) "
+ ).lower()
+ if confirm_input in ["", "y" "yes"]:
+ logger.info(
+ f"Deleting '{oldest_non_default_version_id}' version of '{policy_name}' policy..."
+ )
+ self._iam_client.delete_policy_version(
+ PolicyArn=policy_arn,
+ VersionId=oldest_non_default_version_id,
+ )
+ else:
+ logger.warning(f"Not updating existing '{policy_name}' policy.")
+ return
+
+ logger.info(
+ f"Creating new default version of existing '{policy_name}' policy..."
+ )
+ self._iam_client.create_policy_version(
+ PolicyArn=policy_arn,
+ PolicyDocument=policy_document,
+ SetAsDefault=True,
)
+ logger.info(f"Successfully updated '{policy_name}' policy.")
def _create_kms_key(self):
logger.info("=" * 80)
@@ -444,8 +498,8 @@ def _get_provisioning_role_trust_policy(self):
"Statement": [
{
"Effect": "Allow",
- "Principal": {"Service": ["datazone.amazonaws.com"]},
- "Action": ["sts:AssumeRole"],
+ "Principal": {"Service": "datazone.amazonaws.com"},
+ "Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {"aws:SourceAccount": self._account_id}
},
@@ -455,27 +509,21 @@ def _get_provisioning_role_trust_policy(self):
)
def _get_provisioning_role_policy(self):
- account_id = self._account_id
- region = self._region
return json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
- "Sid": "AmazonDataZonePermissionsToCreateEnvironmentRole",
+ "Sid": "CreateStacks",
"Effect": "Allow",
"Action": [
- "iam:CreateRole",
- "iam:GetRolePolicy",
- "iam:DetachRolePolicy",
- "iam:AttachRolePolicy",
- "iam:UpdateAssumeRolePolicy",
+ "cloudformation:CreateStack",
+ "cloudformation:TagResource",
],
- "Resource": "arn:aws:iam::*:role/DataZoneBedrockProjectRole*",
+ "Resource": "arn:aws:cloudformation:*:*:stack/DataZone*",
"Condition": {
- "StringEquals": {
- "iam:PermissionsBoundary": f"arn:aws:iam::{account_id}:policy/{self._permission_boundary_policy_name}",
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"],
+ "ForAnyValue:StringEquals": {
+ "aws:TagKeys": "AmazonDataZoneEnvironment"
},
"Null": {
"aws:ResourceTag/AmazonDataZoneEnvironment": "false"
@@ -483,456 +531,415 @@ def _get_provisioning_role_policy(self):
},
},
{
- "Sid": "AmazonDataZonePermissionsToServiceRole",
+ "Sid": "ManageStacks",
"Effect": "Allow",
"Action": [
- "iam:CreateRole",
- "iam:GetRolePolicy",
- "iam:DetachRolePolicy",
- "iam:AttachRolePolicy",
- "iam:UpdateAssumeRolePolicy",
+ "cloudformation:DescribeStacks",
+ "cloudformation:DescribeStackEvents",
+ "cloudformation:UpdateStack",
+ "cloudformation:DeleteStack",
],
- "Resource": [
- "arn:aws:iam::*:role/BedrockStudio*",
- "arn:aws:iam::*:role/AmazonBedrockExecution*",
+ "Resource": "arn:aws:cloudformation:*:*:stack/DataZone*",
+ },
+ {
+ "Sid": "DenyOtherActionsNotViaCloudFormation",
+ "Effect": "Deny",
+ "NotAction": [
+ "cloudformation:DescribeStacks",
+ "cloudformation:DescribeStackEvents",
+ "cloudformation:CreateStack",
+ "cloudformation:UpdateStack",
+ "cloudformation:DeleteStack",
+ "cloudformation:TagResource",
],
+ "Resource": "*",
"Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- "Null": {
- "aws:ResourceTag/AmazonDataZoneEnvironment": "false"
- },
+ "StringNotEqualsIfExists": {
+ "aws:CalledViaFirst": "cloudformation.amazonaws.com"
+ }
},
},
{
- "Sid": "IamPassRolePermissionsForBedrock",
+ "Sid": "ListResources",
"Effect": "Allow",
- "Action": ["iam:PassRole"],
- "Resource": "arn:aws:iam::*:role/AmazonBedrockExecution*",
- "Condition": {
- "StringEquals": {
- "iam:PassedToService": ["bedrock.amazonaws.com"],
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"],
- }
- },
+ "Action": [
+ "iam:ListRoles",
+ "s3:ListAllMyBuckets",
+ "aoss:ListCollections",
+ "aoss:BatchGetCollection",
+ "aoss:ListAccessPolicies",
+ "aoss:ListSecurityPolicies",
+ "aoss:ListTagsForResource",
+ "bedrock:ListAgents",
+ "bedrock:ListKnowledgeBases",
+ "bedrock:ListGuardrails",
+ "bedrock:ListPrompts",
+ "bedrock:ListFlows",
+ "bedrock:ListTagsForResource",
+ "lambda:ListFunctions",
+ "logs:DescribeLogGroups",
+ "secretsmanager:ListSecrets",
+ ],
+ "Resource": "*",
},
{
- "Sid": "IamPassRolePermissionsForLambda",
+ "Sid": "GetRoles",
"Effect": "Allow",
- "Action": ["iam:PassRole"],
- "Resource": ["arn:aws:iam::*:role/BedrockStudio*"],
+ "Action": "iam:GetRole",
+ "Resource": [
+ "arn:aws:iam::*:role/DataZoneBedrockProject*",
+ "arn:aws:iam::*:role/AmazonBedrockExecution*",
+ "arn:aws:iam::*:role/BedrockStudio*",
+ ],
+ },
+ {
+ "Sid": "CreateRoles",
+ "Effect": "Allow",
+ "Action": [
+ "iam:CreateRole",
+ "iam:PutRolePolicy",
+ "iam:AttachRolePolicy",
+ "iam:DeleteRolePolicy",
+ "iam:DetachRolePolicy",
+ ],
+ "Resource": [
+ "arn:aws:iam::*:role/DataZoneBedrockProject*",
+ "arn:aws:iam::*:role/AmazonBedrockExecution*",
+ "arn:aws:iam::*:role/BedrockStudio*",
+ ],
"Condition": {
"StringEquals": {
- "iam:PassedToService": ["lambda.amazonaws.com"],
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"],
+ "aws:ResourceTag/AmazonBedrockManaged": "true"
}
},
},
{
- "Sid": "AmazonDataZonePermissionsToManageCreatedEnvironmentRole",
+ "Sid": "ManageRoles",
"Effect": "Allow",
"Action": [
+ "iam:UpdateRole",
"iam:DeleteRole",
- "iam:GetRole",
- "iam:DetachRolePolicy",
- "iam:GetPolicy",
- "iam:DeleteRolePolicy",
- "iam:PutRolePolicy",
+ "iam:ListRolePolicies",
+ "iam:GetRolePolicy",
+ "iam:ListAttachedRolePolicies",
],
"Resource": [
- "arn:aws:iam::*:role/DataZoneBedrockProjectRole*",
- "arn:aws:iam::*:role/AmazonBedrock*",
+ "arn:aws:iam::*:role/DataZoneBedrockProject*",
+ "arn:aws:iam::*:role/AmazonBedrockExecution*",
"arn:aws:iam::*:role/BedrockStudio*",
],
"Condition": {
"StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
+ "aws:ResourceTag/AmazonBedrockManaged": "true"
}
},
},
{
- "Sid": "AmazonDataZoneCFStackCreationForEnvironments",
+ "Sid": "PassRoleToBedrockService",
"Effect": "Allow",
- "Action": [
- "cloudformation:CreateStack",
- "cloudformation:UpdateStack",
- "cloudformation:TagResource",
+ "Action": "iam:PassRole",
+ "Resource": [
+ "arn:aws:iam::*:role/AmazonBedrockExecution*",
+ "arn:aws:iam::*:role/BedrockStudio*",
],
- "Resource": ["arn:aws:cloudformation:*:*:stack/DataZone*"],
"Condition": {
- "ForAnyValue:StringLike": {
- "aws:TagKeys": "AmazonDataZoneEnvironment"
- },
- "Null": {
- "aws:ResourceTag/AmazonDataZoneEnvironment": "false"
- },
+ "StringEquals": {
+ "iam:PassedToService": "bedrock.amazonaws.com"
+ }
},
},
{
- "Sid": "AmazonDataZoneCFStackManagementForEnvironments",
+ "Sid": "PassRoleToLambdaService",
"Effect": "Allow",
- "Action": [
- "cloudformation:DeleteStack",
- "cloudformation:DescribeStacks",
- "cloudformation:DescribeStackEvents",
- ],
- "Resource": ["arn:aws:cloudformation:*:*:stack/DataZone*"],
+ "Action": "iam:PassRole",
+ "Resource": "arn:aws:iam::*:role/BedrockStudio*",
+ "Condition": {
+ "StringEquals": {
+ "iam:PassedToService": "lambda.amazonaws.com"
+ }
+ },
},
{
- "Sid": "AmazonDataZoneEnvironmentBedrockGetViaCloudformation",
+ "Sid": "CreateRoleForOpenSearchServerless",
"Effect": "Allow",
- "Action": [
- "bedrock:GetAgent",
- "bedrock:GetAgentActionGroup",
- "bedrock:GetAgentAlias",
- "bedrock:GetAgentKnowledgeBase",
- "bedrock:GetKnowledgeBase",
- "bedrock:GetDataSource",
- "bedrock:GetGuardrail",
- "bedrock:DeleteGuardrail",
- ],
+ "Action": "iam:CreateServiceLinkedRole",
"Resource": "*",
"Condition": {
"StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
+ "iam:AWSServiceName": "observability.aoss.amazonaws.com"
}
},
},
{
- "Sid": "AmazonDataZoneEnvironmentBedrockAgentPermissions",
+ "Sid": "GetDataZoneBlueprintCfnTemplates",
"Effect": "Allow",
- "Action": [
- "bedrock:CreateAgent",
- "bedrock:UpdateAgent",
- "bedrock:DeleteAgent",
- "bedrock:ListAgents",
- "bedrock:CreateAgentActionGroup",
- "bedrock:UpdateAgentActionGroup",
- "bedrock:DeleteAgentActionGroup",
- "bedrock:ListAgentActionGroups",
- "bedrock:CreateAgentAlias",
- "bedrock:UpdateAgentAlias",
- "bedrock:DeleteAgentAlias",
- "bedrock:ListAgentAliases",
- "bedrock:AssociateAgentKnowledgeBase",
- "bedrock:DisassociateAgentKnowledgeBase",
- "bedrock:UpdateAgentKnowledgeBase",
- "bedrock:ListAgentKnowledgeBases",
- "bedrock:PrepareAgent",
- ],
+ "Action": "s3:GetObject",
"Resource": "*",
"Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"},
+ "StringNotEquals": {
+ "s3:ResourceAccount": "${aws:PrincipalAccount}"
+ }
},
},
{
- "Sid": "AmazonDataZoneEnvironmentOpenSearch",
+ "Sid": "CreateAndAccessS3Buckets",
"Effect": "Allow",
"Action": [
+ "s3:CreateBucket",
+ "s3:DeleteBucket",
+ "s3:GetBucketPolicy",
+ "s3:PutBucketPolicy",
+ "s3:DeleteBucketPolicy",
+ "s3:PutBucketTagging",
+ "s3:PutBucketCORS",
+ "s3:PutBucketLogging",
+ "s3:PutBucketVersioning",
+ "s3:PutBucketPublicAccessBlock",
+ "s3:PutEncryptionConfiguration",
+ "s3:PutLifecycleConfiguration",
+ "s3:GetObject",
+ "s3:GetObjectVersion",
+ ],
+ "Resource": "arn:aws:s3:::br-studio-*",
+ },
+ {
+ "Sid": "ManageOssAccessPolicies",
+ "Effect": "Allow",
+ "Action": [
+ "aoss:GetAccessPolicy",
"aoss:CreateAccessPolicy",
"aoss:DeleteAccessPolicy",
"aoss:UpdateAccessPolicy",
- "aoss:GetAccessPolicy",
- "aoss:ListAccessPolicies",
+ ],
+ "Resource": "*",
+ "Condition": {
+ "StringLikeIfExists": {
+ "aoss:collection": "br-studio-*",
+ "aoss:index": "br-studio-*",
+ }
+ },
+ },
+ {
+ "Sid": "ManageOssSecurityPolicies",
+ "Effect": "Allow",
+ "Action": [
+ "aoss:GetSecurityPolicy",
"aoss:CreateSecurityPolicy",
"aoss:DeleteSecurityPolicy",
"aoss:UpdateSecurityPolicy",
- "aoss:GetSecurityPolicy",
- "aoss:ListSecurityPolicies",
],
"Resource": "*",
"Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- }
+ "StringLikeIfExists": {"aoss:collection": "br-studio-*"}
},
},
{
- "Sid": "AmazonDataZoneEnvironmentOpenSearchPermissions",
+ "Sid": "ManageOssCollections",
"Effect": "Allow",
"Action": [
+ "aoss:CreateCollection",
"aoss:UpdateCollection",
"aoss:DeleteCollection",
- "aoss:BatchGetCollection",
- "aoss:ListCollections",
- "aoss:CreateCollection",
],
"Resource": "*",
"Condition": {
"StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"},
+ "aws:ResourceTag/AmazonBedrockManaged": "true"
+ }
},
},
{
- "Sid": "AmazonDataZoneEnvironmentBedrockKnowledgeBasePermissions",
+ "Sid": "GetBedrockResources",
"Effect": "Allow",
"Action": [
+ "bedrock:GetAgent",
+ "bedrock:GetKnowledgeBase",
+ "bedrock:GetGuardrail",
+ "bedrock:GetPrompt",
+ "bedrock:GetFlow",
+ "bedrock:GetFlowAlias",
+ ],
+ "Resource": "*",
+ },
+ {
+ "Sid": "ManageBedrockResources",
+ "Effect": "Allow",
+ "Action": [
+ "bedrock:CreateAgent",
+ "bedrock:UpdateAgent",
+ "bedrock:PrepareAgent",
+ "bedrock:DeleteAgent",
+ "bedrock:ListAgentAliases",
+ "bedrock:GetAgentAlias",
+ "bedrock:CreateAgentAlias",
+ "bedrock:UpdateAgentAlias",
+ "bedrock:DeleteAgentAlias",
+ "bedrock:ListAgentActionGroups",
+ "bedrock:GetAgentActionGroup",
+ "bedrock:CreateAgentActionGroup",
+ "bedrock:UpdateAgentActionGroup",
+ "bedrock:DeleteAgentActionGroup",
+ "bedrock:ListAgentKnowledgeBases",
+ "bedrock:GetAgentKnowledgeBase",
+ "bedrock:AssociateAgentKnowledgeBase",
+ "bedrock:DisassociateAgentKnowledgeBase",
+ "bedrock:UpdateAgentKnowledgeBase",
"bedrock:CreateKnowledgeBase",
"bedrock:UpdateKnowledgeBase",
"bedrock:DeleteKnowledgeBase",
+ "bedrock:ListDataSources",
+ "bedrock:GetDataSource",
"bedrock:CreateDataSource",
"bedrock:UpdateDataSource",
"bedrock:DeleteDataSource",
- "bedrock:ListKnowledgeBases",
- "bedrock:ListDataSources",
+ "bedrock:CreateGuardrail",
+ "bedrock:UpdateGuardrail",
+ "bedrock:DeleteGuardrail",
+ "bedrock:CreateGuardrailVersion",
+ "bedrock:CreatePrompt",
+ "bedrock:UpdatePrompt",
+ "bedrock:DeletePrompt",
+ "bedrock:CreatePromptVersion",
+ "bedrock:CreateFlow",
+ "bedrock:UpdateFlow",
+ "bedrock:PrepareFlow",
+ "bedrock:DeleteFlow",
+ "bedrock:ListFlowAliases",
+ "bedrock:GetFlowAlias",
+ "bedrock:CreateFlowAlias",
+ "bedrock:UpdateFlowAlias",
+ "bedrock:DeleteFlowAlias",
+ "bedrock:ListFlowVersions",
+ "bedrock:GetFlowVersion",
+ "bedrock:CreateFlowVersion",
+ "bedrock:DeleteFlowVersion",
],
"Resource": "*",
"Condition": {
"StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"},
+ "aws:ResourceTag/AmazonBedrockManaged": "true"
+ }
},
},
{
- "Sid": "AmazonDataZoneEnvironmentBedrockGuardrailPermissions",
+ "Sid": "TagBedrockAgentAliases",
"Effect": "Allow",
- "Action": [
- "bedrock:CreateGuardrail",
- "bedrock:CreateGuardrailVersion",
- "bedrock:DeleteGuardrail",
- "bedrock:ListGuardrails",
- "bedrock:ListTagsForResource",
- "bedrock:TagResource",
- "bedrock:UntagResource",
- "bedrock:UpdateGuardrail",
- ],
- "Resource": "*",
+ "Action": "bedrock:TagResource",
+ "Resource": "arn:aws:bedrock:*:*:agent-alias/*",
"Condition": {
"StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"},
+ "aws:RequestTag/AmazonBedrockManaged": "true"
+ }
},
},
{
- "Sid": "AmazonDataZoneEnvironmentLambdaPermissions",
+ "Sid": "TagBedrockFlowAliases",
"Effect": "Allow",
- "Action": [
- "lambda:AddPermission",
- "lambda:CreateFunction",
- "lambda:ListFunctions",
- "lambda:UpdateFunctionCode",
- "lambda:UpdateFunctionConfiguration",
- "lambda:InvokeFunction",
- "lambda:ListVersionsByFunction",
- "lambda:PublishVersion",
- ],
- "Resource": [
- f"arn:aws:lambda:{region}:{account_id}:function:br-studio*",
- f"arn:aws:lambda:{region}:{account_id}:function:OpensearchIndexLambda*",
- f"arn:aws:lambda:{region}:{account_id}:function:IngestionTriggerLambda*",
- ],
+ "Action": "bedrock:TagResource",
+ "Resource": "arn:aws:bedrock:*:*:flow/*/alias/*",
"Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
"Null": {
- "aws:ResourceTag/AmazonDataZoneEnvironment": "false"
- },
+ "aws:RequestTag/AmazonDataZoneEnvironment": "false"
+ }
},
},
{
- "Sid": "AmazonDataZoneEnvironmentLambdaManagePermissions",
+ "Sid": "CreateFunctions",
"Effect": "Allow",
"Action": [
"lambda:GetFunction",
+ "lambda:CreateFunction",
+ "lambda:InvokeFunction",
"lambda:DeleteFunction",
+ "lambda:UpdateFunctionCode",
+ "lambda:GetFunctionConfiguration",
+ "lambda:UpdateFunctionConfiguration",
+ "lambda:ListVersionsByFunction",
+ "lambda:PublishVersion",
+ "lambda:GetPolicy",
+ "lambda:AddPermission",
"lambda:RemovePermission",
+ "lambda:ListTags",
],
- "Resource": [
- f"arn:aws:lambda:{region}:{account_id}:function:br-studio*",
- f"arn:aws:lambda:{region}:{account_id}:function:OpensearchIndexLambda*",
- f"arn:aws:lambda:{region}:{account_id}:function:IngestionTriggerLambda*",
- ],
- "Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- }
- },
+ "Resource": "arn:aws:lambda:*:*:function:br-studio-*",
},
{
"Sid": "ManageLogGroups",
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
- "logs:PutRetentionPolicy",
"logs:DeleteLogGroup",
+ "logs:PutRetentionPolicy",
+ "logs:DeleteRetentionPolicy",
+ "logs:GetDataProtectionPolicy",
+ "logs:PutDataProtectionPolicy",
+ "logs:DeleteDataProtectionPolicy",
+ "logs:AssociateKmsKey",
+ "logs:DisassociateKmsKey",
+ "logs:ListTagsLogGroup",
+ "logs:ListTagsForResource",
],
- "Resource": [
- "arn:aws:logs:*:*:log-group:/aws/lambda/br-studio-*",
- "arn:aws:logs:*:*:log-group:datazone-*",
- ],
- "Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": "cloudformation.amazonaws.com"
- }
- },
+ "Resource": "arn:aws:logs:*:*:log-group:/aws/lambda/br-studio-*",
},
{
- "Sid": "ListTags",
+ "Sid": "GetRandomPasswordForSecret",
"Effect": "Allow",
- "Action": [
- "bedrock:ListTagsForResource",
- "aoss:ListTagsForResource",
- "lambda:ListTags",
- "iam:ListRoleTags",
- "iam:ListPolicyTags",
- ],
+ "Action": "secretsmanager:GetRandomPassword",
"Resource": "*",
- "Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": "cloudformation.amazonaws.com"
- }
- },
},
{
- "Sid": "AmazonDataZoneEnvironmentTagsCreationPermissions",
+ "Sid": "ManageSecrets",
"Effect": "Allow",
"Action": [
- "iam:TagRole",
- "iam:TagPolicy",
- "iam:UntagRole",
- "iam:UntagPolicy",
- "logs:TagLogGroup",
- "bedrock:TagResource",
- "bedrock:UntagResource",
- "bedrock:ListTagsForResource",
- "aoss:TagResource",
- "aoss:UnTagResource",
- "aoss:ListTagsForResource",
- "lambda:TagResource",
- "lambda:UnTagResource",
- "lambda:ListTags",
+ "secretsmanager:CreateSecret",
+ "secretsmanager:DescribeSecret",
+ "secretsmanager:UpdateSecret",
+ "secretsmanager:DeleteSecret",
+ "secretsmanager:GetResourcePolicy",
+ "secretsmanager:PutResourcePolicy",
+ "secretsmanager:DeleteResourcePolicy",
],
- "Resource": "*",
- "Condition": {
- "ForAnyValue:StringLike": {
- "aws:TagKeys": "AmazonDataZoneEnvironment"
- },
- "Null": {
- "aws:ResourceTag/AmazonDataZoneEnvironment": "false"
- },
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- },
- },
- {
- "Sid": "AmazonDataZoneEnvironmentBedrockTagResource",
- "Effect": "Allow",
- "Action": ["bedrock:TagResource"],
- "Resource": f"arn:aws:bedrock:{region}:{account_id}:agent-alias/*",
- "Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- "ForAnyValue:StringLike": {
- "aws:TagKeys": "AmazonDataZoneEnvironment"
- },
- },
+ "Resource": "arn:aws:secretsmanager:*:*:secret:br-studio/*",
},
{
- "Sid": "AmazonDataZoneEnvironmentKMSPermissions",
+ "Sid": "UseCustomerManagedKmsKey",
"Effect": "Allow",
"Action": [
- "kms:GenerateDataKey",
- "kms:Decrypt",
"kms:DescribeKey",
- "kms:CreateGrant",
"kms:Encrypt",
+ "kms:Decrypt",
+ "kms:GenerateDataKey",
+ "kms:CreateGrant",
+ "kms:RetireGrant",
],
"Resource": "*",
"Condition": {
- "StringEquals": {
- "aws:ResourceTag/EnableBedrock": "true",
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"],
- }
- },
- },
- {
- "Sid": "PermissionsToGetAmazonDataZoneEnvironmentBlueprintTemplates",
- "Effect": "Allow",
- "Action": "s3:GetObject",
- "Resource": "*",
- "Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- "StringNotEquals": {
- "aws:ResourceAccount": "${aws:PrincipalAccount}"
- },
- },
- },
- {
- "Sid": "PermissionsToManageSecrets",
- "Effect": "Allow",
- "Action": ["secretsmanager:GetRandomPassword"],
- "Resource": "*",
- "Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- }
+ "StringEquals": {"aws:ResourceTag/EnableBedrock": "true"}
},
},
{
- "Sid": "PermissionsToStoreSecrets",
+ "Sid": "TagResources",
"Effect": "Allow",
"Action": [
- "secretsmanager:CreateSecret",
+ "iam:TagRole",
+ "iam:UntagRole",
+ "aoss:TagResource",
+ "aoss:UntagResource",
+ "bedrock:TagResource",
+ "bedrock:UntagResource",
+ "lambda:TagResource",
+ "lambda:UntagResource",
+ "logs:TagLogGroup",
+ "logs:UntagLogGroup",
+ "logs:TagResource",
+ "logs:UntagResource",
"secretsmanager:TagResource",
"secretsmanager:UntagResource",
- "secretsmanager:PutResourcePolicy",
- "secretsmanager:DeleteResourcePolicy",
- "secretsmanager:DeleteSecret",
],
"Resource": "*",
"Condition": {
"StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- },
- "Null": {
- "aws:ResourceTag/AmazonDataZoneEnvironment": "false"
- },
- },
- },
- {
- "Sid": "AmazonDataZoneManageProjectBuckets",
- "Effect": "Allow",
- "Action": [
- "s3:CreateBucket",
- "s3:DeleteBucket",
- "s3:PutBucketTagging",
- "s3:PutEncryptionConfiguration",
- "s3:PutBucketVersioning",
- "s3:PutBucketCORS",
- "s3:PutBucketPublicAccessBlock",
- "s3:PutBucketPolicy",
- "s3:PutLifecycleConfiguration",
- "s3:DeleteBucketPolicy",
- ],
- "Resource": "arn:aws:s3:::br-studio-*",
- "Condition": {
- "StringEquals": {
- "aws:CalledViaFirst": ["cloudformation.amazonaws.com"]
- }
- },
- },
- {
- "Sid": "CreateServiceLinkedRoleForOpenSearchServerless",
- "Effect": "Allow",
- "Action": "iam:CreateServiceLinkedRole",
- "Resource": "*",
- "Condition": {
- "StringEquals": {
- "iam:AWSServiceName": "observability.aoss.amazonaws.com",
- "aws:CalledViaFirst": "cloudformation.amazonaws.com",
+ "aws:ResourceTag/AmazonBedrockManaged": "true"
}
},
},
@@ -947,7 +954,7 @@ def _get_service_role_trust_policy(self):
"Statement": [
{
"Effect": "Allow",
- "Principal": {"Service": ["datazone.amazonaws.com"]},
+ "Principal": {"Service": "datazone.amazonaws.com"},
"Action": ["sts:AssumeRole", "sts:TagSession"],
"Condition": {
"StringEquals": {"aws:SourceAccount": self._account_id},
@@ -964,10 +971,20 @@ def _get_service_role_policy(self):
"Version": "2012-10-17",
"Statement": [
{
- "Sid": "DomainExecutionRoleStatement",
+ "Sid": "GetDataZoneDomain",
+ "Effect": "Allow",
+ "Action": "datazone:GetDomain",
+ "Resource": "*",
+ "Condition": {
+ "StringEquals": {
+ "aws:ResourceTag/AmazonBedrockManaged": "true"
+ }
+ },
+ },
+ {
+ "Sid": "ManageDataZoneResources",
"Effect": "Allow",
"Action": [
- "datazone:GetDomain",
"datazone:ListProjects",
"datazone:GetProject",
"datazone:CreateProject",
@@ -983,17 +1000,13 @@ def _get_service_role_policy(self):
"datazone:DeleteEnvironment",
"datazone:ListEnvironmentBlueprints",
"datazone:GetEnvironmentBlueprint",
- "datazone:CreateEnvironmentBlueprint",
- "datazone:UpdateEnvironmentBlueprint",
- "datazone:DeleteEnvironmentBlueprint",
"datazone:ListEnvironmentBlueprintConfigurations",
- "datazone:ListEnvironmentBlueprintConfigurationSummaries",
+ "datazone:GetEnvironmentBlueprintConfiguration",
"datazone:ListEnvironmentProfiles",
"datazone:GetEnvironmentProfile",
"datazone:CreateEnvironmentProfile",
"datazone:UpdateEnvironmentProfile",
"datazone:DeleteEnvironmentProfile",
- "datazone:UpdateEnvironmentDeploymentStatus",
"datazone:GetEnvironmentCredentials",
"datazone:ListGroupsForUser",
"datazone:SearchUserProfiles",
@@ -1004,21 +1017,23 @@ def _get_service_role_policy(self):
"Resource": "*",
},
{
- "Sid": "RAMResourceShareStatement",
+ "Sid": "GetResourceShareAssociations",
"Effect": "Allow",
"Action": "ram:GetResourceShareAssociations",
"Resource": "*",
},
{
+ "Sid": "InvokeBedrockModels",
"Effect": "Allow",
"Action": [
+ "bedrock:GetFoundationModelAvailability",
"bedrock:InvokeModel",
"bedrock:InvokeModelWithResponseStream",
- "bedrock:GetFoundationModelAvailability",
],
"Resource": "*",
},
{
+ "Sid": "UseCustomerManagedKmsKey",
"Effect": "Allow",
"Action": [
"kms:DescribeKey",
@@ -1026,6 +1041,9 @@ def _get_service_role_policy(self):
"kms:Decrypt",
],
"Resource": "*",
+ "Condition": {
+ "StringEquals": {"aws:ResourceTag/EnableBedrock": "true"}
+ },
},
],
}
@@ -1193,25 +1211,37 @@ def _get_permission_boundary(self):
"Version": "2012-10-17",
"Statement": [
{
- "Sid": "BedrockEnvironmentRoleKMSDecryptPermissions",
+ "Sid": "AccessS3Buckets",
"Effect": "Allow",
- "Action": ["kms:Decrypt", "kms:GenerateDataKey"],
- "Resource": "*",
+ "Action": [
+ "s3:ListBucket",
+ "s3:ListBucketVersions",
+ "s3:GetObject",
+ "s3:PutObject",
+ "s3:DeleteObject",
+ "s3:GetObjectVersion",
+ "s3:DeleteObjectVersion",
+ ],
+ "Resource": "arn:aws:s3:::br-studio-${aws:PrincipalAccount}-*",
"Condition": {
- "StringEquals": {"aws:ResourceTag/EnableBedrock": "true"}
+ "StringEquals": {
+ "s3:ResourceAccount": "${aws:PrincipalAccount}"
+ }
},
},
{
- "Sid": "BedrockRuntimeAgentPermissions",
+ "Sid": "AccessOssCollections",
"Effect": "Allow",
- "Action": ["bedrock:InvokeAgent"],
+ "Action": "aoss:APIAccessAll",
"Resource": "*",
"Condition": {
- "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"}
+ "StringEquals": {
+ "aws:ResourceAccount": "${aws:PrincipalAccount}"
+ }
},
},
{
- "Sid": "BedrockRuntimeModelsAndJobsRole",
+ "Sid": "InvokeBedrockModels",
"Effect": "Allow",
"Action": [
"bedrock:InvokeModel",
@@ -1221,50 +1251,106 @@ def _get_permission_boundary(self):
"Resource": "*",
},
{
- "Sid": "BedrockApplyGuardrails",
+ "Sid": "AccessBedrockResources",
"Effect": "Allow",
- "Action": ["bedrock:ApplyGuardrail"],
+ "Action": [
+ "bedrock:InvokeAgent",
+ "bedrock:Retrieve",
+ "bedrock:StartIngestionJob",
+ "bedrock:GetIngestionJob",
+ "bedrock:ListIngestionJobs",
+ "bedrock:ApplyGuardrail",
+ "bedrock:ListPrompts",
+ "bedrock:GetPrompt",
+ "bedrock:CreatePrompt",
+ "bedrock:DeletePrompt",
+ "bedrock:CreatePromptVersion",
+ "bedrock:InvokeFlow",
+ "bedrock:ListTagsForResource",
+ "bedrock:TagResource",
+ "bedrock:UntagResource",
+ ],
"Resource": "*",
"Condition": {
- "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"}
+ "StringEquals": {
+ "aws:ResourceAccount": "${aws:PrincipalAccount}",
+ "aws:ResourceTag/AmazonBedrockManaged": "true",
+ },
+ "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"},
},
},
{
- "Sid": "BedrockRuntimePermissions",
+ "Sid": "InvokeBedrockFlows",
+ "Effect": "Allow",
+ "Action": "bedrock:InvokeFlow",
+ "Resource": "arn:aws:bedrock:*:*:flow/*/alias/*",
+ "Condition": {
+ "StringEquals": {
+ "aws:ResourceAccount": "${aws:PrincipalAccount}",
+ },
+ "Null": {
+ "aws:ResourceTag/AmazonDataZoneProject": "false",
+ },
+ },
+ },
+ {
+ "Sid": "WriteLogs",
"Effect": "Allow",
"Action": [
- "bedrock:Retrieve",
- "bedrock:StartIngestionJob",
- "bedrock:GetIngestionJob",
- "bedrock:ListIngestionJobs",
+ "logs:CreateLogGroup",
+ "logs:CreateLogStream",
+ "logs:PutLogEvents",
],
"Resource": "*",
"Condition": {
- "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"}
+ "StringEquals": {
+ "aws:ResourceAccount": "${aws:PrincipalAccount}",
+ "aws:ResourceTag/AmazonBedrockManaged": "true",
+ },
+ "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"},
},
},
{
- "Sid": "BedrockFunctionsPermissions",
- "Action": ["secretsmanager:PutSecretValue"],
- "Resource": "arn:aws:secretsmanager:*:*:secret:br-studio/*",
+ "Sid": "InvokeLambdaFunctions",
"Effect": "Allow",
+ "Action": "lambda:InvokeFunction",
+ "Resource": "arn:aws:lambda:*:*:function:br-studio-*",
"Condition": {
- "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"}
+ "StringEquals": {
+ "aws:ResourceAccount": "${aws:PrincipalAccount}",
+ "aws:ResourceTag/AmazonBedrockManaged": "true",
+ },
+ "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"},
},
},
{
- "Sid": "BedrockS3ObjectsHandlingPermissions",
+ "Sid": "AccessSecretsManagerSecrets",
+ "Effect": "Allow",
"Action": [
- "s3:GetObject",
- "s3:PutObject",
- "s3:GetObjectVersion",
- "s3:ListBucketVersions",
- "s3:DeleteObject",
- "s3:DeleteObjectVersion",
- "s3:ListBucket",
+ "secretsmanager:DescribeSecret",
+ "secretsmanager:GetSecretValue",
+ "secretsmanager:PutSecretValue",
],
- "Resource": [f"arn:aws:s3:::br-studio-{self._account_id}-*"],
+ "Resource": "arn:aws:secretsmanager:*:*:secret:br-studio/*",
+ "Condition": {
+ "StringEquals": {
+ "aws:ResourceAccount": "${aws:PrincipalAccount}",
+ "aws:ResourceTag/AmazonBedrockManaged": "true",
+ },
+ "Null": {"aws:ResourceTag/AmazonDataZoneProject": "false"},
+ },
+ },
+ {
+ "Sid": "UseCustomerManagedKmsKey",
"Effect": "Allow",
+ "Action": ["kms:Decrypt", "kms:GenerateDataKey"],
+ "Resource": "*",
+ "Condition": {
+ "StringEquals": {
+ "aws:ResourceAccount": "${aws:PrincipalAccount}",
+ "aws:ResourceTag/EnableBedrock": "true",
+ }
+ },
},
],
}
From 601bf3d20fae4910b510379df0c88ea1737536e8 Mon Sep 17 00:00:00 2001
From: Laren-AWS <57545972+Laren-AWS@users.noreply.github.com>
Date: Tue, 23 Jul 2024 12:10:44 -0700
Subject: [PATCH 06/98] SOS: Removed curated examples (#6678)
* Remove curated examples because it's obsolete.
* Update tools release to remove curated example checks.
---
.../zonbook/example_curated_template.xml | 30 -------------------
.../zonbook/library_by_service_chapter.xml | 28 -----------------
.../zonbook/service_chapter_template.xml | 28 -----------------
.../zonbook/utility/curated_examples.xml | 21 -------------
.github/workflows/validate-doc-metadata.yml | 2 +-
5 files changed, 1 insertion(+), 108 deletions(-)
delete mode 100644 .doc_gen/templates/zonbook/example_curated_template.xml
delete mode 100644 .doc_gen/templates/zonbook/utility/curated_examples.xml
diff --git a/.doc_gen/templates/zonbook/example_curated_template.xml b/.doc_gen/templates/zonbook/example_curated_template.xml
deleted file mode 100644
index 6f4b3f3db73..00000000000
--- a/.doc_gen/templates/zonbook/example_curated_template.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-{{- template "prologue"}}
-{{- $include_docs := "file://AWSShared/code-samples/docs/"}}
-{{- if isSnapshot}}
- {{- $include_docs = ""}}
-{{- end}}
-
- {{- if .BlockContent}}
-
- {{- else}}
- {{.Description}}
- {{- if .DescriptionList}}
-
- {{- range $desc := .DescriptionList}}
- {{$desc}}
- {{- end}}
-
- {{- end}}
- {{- end}}
-
- For complete source code and instructions on how to set up and run, see
- {{.UrlText}} in
- {{.SourceName}}.
-
-
- Services used in this example
- {{- range $svc_ent := $.Services}}
- {{$svc_ent.Short}}
- {{- end}}
-
-
\ No newline at end of file
diff --git a/.doc_gen/templates/zonbook/library_by_service_chapter.xml b/.doc_gen/templates/zonbook/library_by_service_chapter.xml
index 14af8d7999c..78337ee75d4 100644
--- a/.doc_gen/templates/zonbook/library_by_service_chapter.xml
+++ b/.doc_gen/templates/zonbook/library_by_service_chapter.xml
@@ -104,34 +104,6 @@
{{- end}}
{{- end}}
{{- end}}
- {{- if $service.CuratedExampleSets}}
-
-
- Curated code examples for {{$service.ServiceEntity.Short}}
- Curated examples
-
- The following curated code examples show a variety of ways to use {{$service.ServiceEntity.Short}}.
-
-
- The following curated code examples show a variety of ways to use {{$service.ServiceEntity.Short}}.
- Example sources
- {{- range $curated_set := $service.CuratedExampleSets}}
-
-
- {{$curated_set.Source.Name}}
- {{$curated_set.Source.Name}}
-
- The following code examples show how to use {{$service.ServiceEntity.Short}} with {{$curated_set.Source.Name}}.
-
-
- {{$curated_set.Source.Description}}
- The following curated code examples from {{$curated_set.Source.Name}}
- show how to use {{$service.ServiceEntity.Short}}.
- {{- template "curated_examples" $curated_set}}
-
- {{- end}}
-
- {{- end}}
{{- end}}
diff --git a/.doc_gen/templates/zonbook/service_chapter_template.xml b/.doc_gen/templates/zonbook/service_chapter_template.xml
index ed98f23aacd..0da03cbc429 100644
--- a/.doc_gen/templates/zonbook/service_chapter_template.xml
+++ b/.doc_gen/templates/zonbook/service_chapter_template.xml
@@ -81,34 +81,6 @@
{{- end}}
{{- end}}
{{- end}}
- {{- if .CuratedExampleSets}}
-
-
- Curated code examples for {{$.ServiceEntity.Short}}
- Curated examples
-
- The following curated code examples show a variety of ways to use {{$.ServiceEntity.Short}}.
-
-
- The following curated code examples show a variety of ways to use {{$.ServiceEntity.Short}}.
- Example sources
- {{- range $curated_set := .CuratedExampleSets}}
-
-
- {{$curated_set.Source.Name}}
- {{$curated_set.Source.Name}}
-
- The following code examples show how to use {{$.ServiceEntity.Short}} with {{$curated_set.Source.Name}}.
-
-
- {{$curated_set.Source.Description}}
- The following curated code examples from {{$curated_set.Source.Name}}
- show how to use {{$.ServiceEntity.Short}}.
- {{- template "curated_examples" $curated_set}}
-
- {{- end}}
-
- {{- end}}
{{- if .Bundle}}
{{- else}}
diff --git a/.doc_gen/templates/zonbook/utility/curated_examples.xml b/.doc_gen/templates/zonbook/utility/curated_examples.xml
deleted file mode 100644
index 838d2ad1165..00000000000
--- a/.doc_gen/templates/zonbook/utility/curated_examples.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-{{- define "curated_examples"}}
-{{- $curated_set := .}}
-{{- $include_docs := "file://AWSShared/code-samples/docs/"}}
-{{- if isSnapshot}}
- {{- $include_docs = ""}}
-{{- end}}
-Examples
-{{- range $curated_set.Examples}}
-
-
- {{.Title}}
- {{.TitleAbbrev}}
-
- {{.Title}}.
-
-
-
-
-
-{{- end}}
-{{- end}}
\ No newline at end of file
diff --git a/.github/workflows/validate-doc-metadata.yml b/.github/workflows/validate-doc-metadata.yml
index 218d1623a80..22ac2290b2e 100644
--- a/.github/workflows/validate-doc-metadata.yml
+++ b/.github/workflows/validate-doc-metadata.yml
@@ -16,7 +16,7 @@ jobs:
- name: checkout repo content
uses: actions/checkout@v4
- name: validate metadata
- uses: awsdocs/aws-doc-sdk-examples-tools@2024-07-22-A
+ uses: awsdocs/aws-doc-sdk-examples-tools@2024-07-22-B
with:
doc_gen_only: "False"
strict_titles: "True"
From 1de3fdf3435ea3521f0e85c20cf6c8416bb4a7da Mon Sep 17 00:00:00 2001
From: Steven Meyer <108885656+meyertst-aws@users.noreply.github.com>
Date: Wed, 24 Jul 2024 10:52:59 -0400
Subject: [PATCH 07/98] C++:S3:Object Integrity Workflow (#6599)
---
.doc_gen/metadata/s3_metadata.yaml | 65 +
.doc_gen/validation.yaml | 1 +
cpp/example_code/s3/CMakeLists.txt | 80 +-
cpp/example_code/s3/README.md | 20 +-
cpp/example_code/s3/copy_object.cpp | 4 +-
cpp/example_code/s3/create_bucket.cpp | 4 +-
cpp/example_code/s3/delete_bucket.cpp | 4 +-
cpp/example_code/s3/delete_bucket_policy.cpp | 4 +-
cpp/example_code/s3/delete_object.cpp | 4 +-
cpp/example_code/s3/delete_objects.cpp | 4 +-
cpp/example_code/s3/delete_website_config.cpp | 4 +-
cpp/example_code/s3/get_bucket_acl.cpp | 4 +-
cpp/example_code/s3/get_bucket_policy.cpp | 4 +-
cpp/example_code/s3/get_object.cpp | 4 +-
cpp/example_code/s3/get_put_bucket_acl.cpp | 4 +-
cpp/example_code/s3/get_put_object_acl.cpp | 4 +-
cpp/example_code/s3/get_website_config.cpp | 4 +-
cpp/example_code/s3/list_buckets.cpp | 4 +-
.../s3/list_buckets_disabling_dns_cache.cpp | 4 +-
cpp/example_code/s3/list_objects.cpp | 10 +-
.../list_objects_with_aws_global_region.cpp | 6 +-
cpp/example_code/s3/presigned_get_object.cpp | 4 +-
cpp/example_code/s3/presigned_put_object.cpp | 4 +-
cpp/example_code/s3/put_bucket_acl.cpp | 4 +-
cpp/example_code/s3/put_bucket_policy.cpp | 4 +-
cpp/example_code/s3/put_object.cpp | 4 +-
cpp/example_code/s3/put_object_async.cpp | 4 +-
cpp/example_code/s3/put_object_buffer.cpp | 4 +-
cpp/example_code/s3/put_website_config.cpp | 4 +-
cpp/example_code/s3/s3_demo_for_cloud9.cpp | 4 +-
cpp/example_code/s3/s3_examples.h | 4 +
.../s3/s3_getting_started_scenario.cpp | 4 +-
.../CMakeLists.txt | 66 +
.../s3/s3_object_integrity_workflow/README.md | 46 +
.../s3/s3_object_integrity_workflow/main.cpp | 45 +
.../s3_object_integrity_workflow.cpp | 1718 +++++++++++++++++
cpp/example_code/s3/tests/CMakeLists.txt | 25 +-
cpp/example_code/s3/tests/S3_GTests.cpp | 33 +-
cpp/example_code/s3/tests/S3_GTests.h | 8 +
.../s3/tests/gtest_list_objects.cpp | 3 +-
.../gtest_s3_object_integrity_workflow.cpp | 222 +++
workflows/s3_object_integrity/README.md | 30 +
.../s3_object_integrity/SPECIFICATION.md | 164 ++
43 files changed, 2533 insertions(+), 113 deletions(-)
create mode 100644 cpp/example_code/s3/s3_object_integrity_workflow/CMakeLists.txt
create mode 100644 cpp/example_code/s3/s3_object_integrity_workflow/README.md
create mode 100644 cpp/example_code/s3/s3_object_integrity_workflow/main.cpp
create mode 100644 cpp/example_code/s3/s3_object_integrity_workflow/s3_object_integrity_workflow.cpp
create mode 100644 cpp/example_code/s3/tests/gtest_s3_object_integrity_workflow.cpp
create mode 100644 workflows/s3_object_integrity/README.md
create mode 100644 workflows/s3_object_integrity/SPECIFICATION.md
diff --git a/.doc_gen/metadata/s3_metadata.yaml b/.doc_gen/metadata/s3_metadata.yaml
index 0c4e4d7722a..e4403cce6d8 100644
--- a/.doc_gen/metadata/s3_metadata.yaml
+++ b/.doc_gen/metadata/s3_metadata.yaml
@@ -1184,6 +1184,18 @@ s3_AbortMultipartUploads:
- S3.dotnetv3.AbortMPUExample
services:
s3: {AbortMultipartUploads}
+s3_AbortMultipartUpload:
+ languages:
+ C++:
+ versions:
+ - sdk_version: 1
+ github: cpp/example_code/s3
+ excerpts:
+ - description:
+ snippet_tags:
+ - cpp.example_code.s3.AbortMultipartUpload
+ services:
+ s3: {AbortMultipartUpload}
s3_CompleteMultipartUpload:
languages:
Rust:
@@ -1194,6 +1206,14 @@ s3_CompleteMultipartUpload:
- description:
snippet_tags:
- rust.example_code.s3.complete_multipart_upload
+ C++:
+ versions:
+ - sdk_version: 1
+ github: cpp/example_code/s3
+ excerpts:
+ - description:
+ snippet_tags:
+ - cpp.example_code.s3.CompleteMultipartUpload
services:
s3: {CompleteMultipartUpload}
s3_CreateMultipartUpload:
@@ -1206,6 +1226,14 @@ s3_CreateMultipartUpload:
- description:
snippet_tags:
- rust.example_code.s3.create_multipart_upload
+ C++:
+ versions:
+ - sdk_version: 1
+ github: cpp/example_code/s3
+ excerpts:
+ - description:
+ snippet_tags:
+ - cpp.example_code.s3.CreateMultipartUpload
services:
s3: {CreateMultipartUpload}
s3_ListMultipartUploads:
@@ -2333,6 +2361,14 @@ s3_UploadPart:
snippet_tags:
- rust.example_code.s3.upload_part
- rust.example_code.s3.upload_part.CompletedMultipartUpload
+ C++:
+ versions:
+ - sdk_version: 1
+ github: cpp/example_code/s3
+ excerpts:
+ - description:
+ snippet_tags:
+ - cpp.example_code.s3.UploadPart
services:
s3: {UploadPart}
s3_PutBucketNotificationConfiguration:
@@ -2379,6 +2415,18 @@ s3_PutBucketAccelerateConfiguration:
- S3.dotnetv3.TransferAccelerationExample
services:
s3: {PutBucketAccelerateConfiguration}
+s3_GetObjectAttributes:
+ languages:
+ C++:
+ versions:
+ - sdk_version: 1
+ github: cpp/example_code/s3
+ excerpts:
+ - description:
+ snippet_tags:
+ - cpp.example_code.s3.GetObjectAttributes
+ services:
+ s3: {GetObjectAttributes}
s3_DownloadBucketToDirectory:
title: Download all objects in an &S3long; (&S3;) bucket to a local directory
title_abbrev: Download objects to a local directory
@@ -3243,6 +3291,23 @@ s3_Scenario_ObjectLock:
- javascriptv3/example_code/s3/scenarios/object-locking/clean.steps.js
services:
s3: {PutObjectLockConfiguration, PutObjectRetention, GetObjectRetention, PutObjectLegalHold, GetObjectLegalHold, GetObjectLockConfiguration}
+s3_Scenario_ObjectIntegrity:
+ title: Work with &S3; object integrity features using an &AWS; SDK
+ title_abbrev: Work with &S3; object integrity
+ synopsis: work with S3 object integrity features.
+ category: Scenarios
+ languages:
+ C++:
+ versions:
+ - sdk_version: 1
+ github: cpp/example_code/s3/s3_object_integrity_workflow
+ sdkguide:
+ excerpts:
+ - description: Run an interactive scenario demonstrating &S3; object integrity features.
+ snippet_tags:
+ - cpp.example_code.s3.Scenario_ObjectIntegrity
+ services:
+ s3: {AbortMultipartUpload, CreateMultipartUpload, DeleteObject, GetObjectAttributes, PutObject, UploadPart, CompleteMultipartUpload}
s3_SelectObjectContent:
languages:
Java:
diff --git a/.doc_gen/validation.yaml b/.doc_gen/validation.yaml
index a93b249c05c..c368e7914a1 100644
--- a/.doc_gen/validation.yaml
+++ b/.doc_gen/validation.yaml
@@ -204,6 +204,7 @@ allow_list:
- "v2/service/cognitoidentityprovider/types"
- "abortIncompleteMultipartUploadsOlderThan"
- "com/pinterest/ktlint/releases/download/1"
+ - "aws/s3/model/AbortMultipartUploadRequest"
- "src/main/kotlin/com/example/ecr/HelloECR"
sample_files:
- "README.md"
diff --git a/cpp/example_code/s3/CMakeLists.txt b/cpp/example_code/s3/CMakeLists.txt
index 0a2fb09ba2f..fb3e9027d03 100644
--- a/cpp/example_code/s3/CMakeLists.txt
+++ b/cpp/example_code/s3/CMakeLists.txt
@@ -5,7 +5,7 @@
cmake_minimum_required(VERSION 3.13)
set(SERVICE_NAME s3)
-set(SERVICE_COMPONENTS iam s3 sts)
+set(SERVICE_COMPONENTS iam s3 sts transfer)
# Set this project's name.
project("${SERVICE_NAME}-examples")
@@ -17,62 +17,62 @@ set(CMAKE_CXX_STANDARD 11)
set(WINDOWS_BUILD ${MSVC})
# Set the location of where Windows can find the installed libraries of the SDK.
-if(WINDOWS_BUILD)
+if (WINDOWS_BUILD)
string(REPLACE ";" "/aws-cpp-sdk-all;" SYSTEM_MODULE_PATH "${CMAKE_SYSTEM_PREFIX_PATH}/aws-cpp-sdk-all")
list(APPEND CMAKE_PREFIX_PATH ${SYSTEM_MODULE_PATH})
-endif()
+endif ()
# CURL package is optional to test pre-signed url code.
find_package(CURL)
-if (NOT CURL_FOUND)
- unset(CURL_INCLUDE_DIRS)
- unset(CURL_LIBRARIES)
+if (NOT CURL_FOUND)
+ unset(CURL_INCLUDE_DIRS)
+ unset(CURL_LIBRARIES)
unset(CURL_LIBRARY)
-endif()
+endif ()
# Find the AWS SDK for C++ package.
find_package(AWSSDK REQUIRED COMPONENTS ${SERVICE_COMPONENTS})
-if(WINDOWS_BUILD AND AWSSDK_INSTALL_AS_SHARED_LIBS)
- # Copy relevant AWS SDK for C++ libraries into the current binary directory for running and debugging.
+if (WINDOWS_BUILD AND AWSSDK_INSTALL_AS_SHARED_LIBS)
+ # Copy relevant AWS SDK for C++ libraries into the current binary directory for running and debugging.
- # set(BIN_SUB_DIR "/Debug") # If you are building from the command line, you may need to uncomment this
- # and set the proper subdirectory to the executables' location.
+ # set(BIN_SUB_DIR "/Debug") # If you are building from the command line, you may need to uncomment this
+ # and set the proper subdirectory to the executables' location.
- AWSSDK_CPY_DYN_LIBS(SERVICE_COMPONENTS "" ${CMAKE_CURRENT_BINARY_DIR}${BIN_SUB_DIR})
- endif()
+ AWSSDK_CPY_DYN_LIBS(SERVICE_COMPONENTS "" ${CMAKE_CURRENT_BINARY_DIR}${BIN_SUB_DIR})
+endif ()
# Add the code example-specific header files.
file(GLOB AWSDOC_S3_HEADERS
- "include/awsdoc/s3/*.h"
+ "include/awsdoc/s3/*.h"
)
# AWSDOC_S3_SOURCE can be defined in the command line to limit the files in a build. For example,
# you can limit files to one action.
-if(NOT DEFINED AWSDOC_S3_SOURCE)
+if (NOT DEFINED AWSDOC_S3_SOURCE)
file(GLOB AWSDOC_S3_SOURCE
- "*.cpp"
+ "*.cpp"
)
-endif()
+endif ()
# Handle special case of list_buckets_disabling_dns_cache.cpp on Windows.
-if(WINDOWS_BUILD)
+if (WINDOWS_BUILD)
list(FIND AWSSDK_CLIENT_LIBS "curl" CONTAINS_CURL)
if (CONTAINS_CURL EQUAL -1)
- # Remove list_buckets_disabling_dns_cache.cpp when not using curl library for http.
+ # Remove list_buckets_disabling_dns_cache.cpp when not using curl library for http.
list(FILTER AWSDOC_S3_SOURCE EXCLUDE REGEX "/list_buckets_disabling_dns_cache.cpp$") # Not supported in windows without curl, see file for details
- else()
+ else ()
if (NOT CURL_FOUND) # find_package did not set these.
set(CURL_INCLUDE_DIRS "c:/curl/include") # Update this with correct curl install location.
set(CURL_LIBRARIES "c:/curl/lib") # Update this with correct curl install location.
- endif()
- endif()
-endif()
+ endif ()
+ endif ()
+endif ()
-foreach(file ${AWSDOC_S3_SOURCE})
+foreach (file ${AWSDOC_S3_SOURCE})
get_filename_component(EXAMPLE ${file} NAME_WE)
# Build the code example executables.
@@ -80,30 +80,24 @@ foreach(file ${AWSDOC_S3_SOURCE})
add_executable(${EXAMPLE_EXE} ${AWSDOC_S3_HEADERS} ${file})
- target_include_directories(${EXAMPLE_EXE} PUBLIC
- $
- $
- )
-
-
- target_include_directories(${EXAMPLE_EXE} SYSTEM PUBLIC
- ${CURL_INCLUDE_DIRS})
-
- target_link_libraries(${EXAMPLE_EXE}
- ${AWSSDK_LINK_LIBRARIES}
- ${AWSSDK_PLATFORM_DEPS}
+ target_include_directories(${EXAMPLE_EXE} SYSTEM PUBLIC
+ ${CURL_INCLUDE_DIRS})
+
+ target_link_libraries(${EXAMPLE_EXE}
+ ${AWSSDK_LINK_LIBRARIES}
+ ${AWSSDK_PLATFORM_DEPS}
${CURL_LIBRARIES})
- if(CURL_FOUND)
+ if (CURL_FOUND)
target_compile_definitions(${EXAMPLE_EXE}
- PUBLIC
- HAS_CURL=1)
- endif()
+ PUBLIC
+ HAS_CURL=1)
+ endif ()
- endforeach()
+endforeach ()
-if(BUILD_TESTS)
+if (BUILD_TESTS)
add_subdirectory(tests)
-endif()
+endif ()
diff --git a/cpp/example_code/s3/README.md b/cpp/example_code/s3/README.md
index 3f0b7d13952..aa784db1057 100644
--- a/cpp/example_code/s3/README.md
+++ b/cpp/example_code/s3/README.md
@@ -46,8 +46,11 @@ Next, for information on code example structures and how to build and run the ex
Code excerpts that show you how to call individual service functions.
+- [AbortMultipartUpload](s3_object_integrity_workflow/s3_object_integrity_workflow.cpp#L1097)
+- [CompleteMultipartUpload](s3_object_integrity_workflow/s3_object_integrity_workflow.cpp#L1129)
- [CopyObject](copy_object.cpp#L32)
- [CreateBucket](create_bucket.cpp#L30)
+- [CreateMultipartUpload](s3_object_integrity_workflow/s3_object_integrity_workflow.cpp#L1006)
- [DeleteBucket](delete_bucket.cpp#L30)
- [DeleteBucketPolicy](delete_bucket_policy.cpp#L30)
- [DeleteBucketWebsite](delete_website_config.cpp#L30)
@@ -58,13 +61,15 @@ Code excerpts that show you how to call individual service functions.
- [GetBucketWebsite](get_website_config.cpp#L29)
- [GetObject](get_object.cpp#L33)
- [GetObjectAcl](get_put_object_acl.cpp#L43)
+- [GetObjectAttributes](s3_object_integrity_workflow/s3_object_integrity_workflow.cpp#L707)
- [ListBuckets](list_buckets.cpp#L29)
-- [ListObjectsV2](list_objects.cpp#L31)
+- [ListObjectsV2](list_objects.cpp#L32)
- [PutBucketAcl](put_bucket_acl.cpp#L47)
- [PutBucketPolicy](put_bucket_policy.cpp#L37)
- [PutBucketWebsite](put_website_config.cpp#L33)
- [PutObject](put_object.cpp#L33)
- [PutObjectAcl](get_put_object_acl.cpp#L165)
+- [UploadPart](s3_object_integrity_workflow/s3_object_integrity_workflow.cpp#L1040)
### Scenarios
@@ -73,6 +78,7 @@ functions within the same service.
- [Create a presigned URL](presigned_get_object.cpp)
- [Get started with buckets and objects](s3_getting_started_scenario.cpp)
+- [Work with Amazon S3 object integrity](s3_object_integrity_workflow/s3_object_integrity_workflow.cpp)
### Cross-service examples
@@ -139,6 +145,18 @@ This example shows you how to do the following:
+#### Work with Amazon S3 object integrity
+
+This example shows you how to work with S3 object integrity features.
+
+
+
+
+
+
+
+
+
### Tests
⚠ Running tests might result in charges to your AWS account.
diff --git a/cpp/example_code/s3/copy_object.cpp b/cpp/example_code/s3/copy_object.cpp
index 3415f2fdeb3..144ad18b538 100644
--- a/cpp/example_code/s3/copy_object.cpp
+++ b/cpp/example_code/s3/copy_object.cpp
@@ -66,7 +66,7 @@ bool AwsDoc::S3::copyObject(const Aws::String &objectKey, const Aws::String &fro
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[]) {
Aws::SDKOptions options;
@@ -100,5 +100,5 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/create_bucket.cpp b/cpp/example_code/s3/create_bucket.cpp
index 8fd038a4e9a..23be332bb2e 100644
--- a/cpp/example_code/s3/create_bucket.cpp
+++ b/cpp/example_code/s3/create_bucket.cpp
@@ -64,7 +64,7 @@ bool AwsDoc::S3::createBucket(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main() {
Aws::SDKOptions options;
@@ -88,5 +88,5 @@ int main() {
ShutdownAPI(options);
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/delete_bucket.cpp b/cpp/example_code/s3/delete_bucket.cpp
index dd94f7f7486..c8ba0fd6b53 100644
--- a/cpp/example_code/s3/delete_bucket.cpp
+++ b/cpp/example_code/s3/delete_bucket.cpp
@@ -61,7 +61,7 @@ bool AwsDoc::S3::deleteBucket(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[]) {
Aws::SDKOptions options;
@@ -90,5 +90,5 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/delete_bucket_policy.cpp b/cpp/example_code/s3/delete_bucket_policy.cpp
index b317c48a0af..75bb08c8970 100644
--- a/cpp/example_code/s3/delete_bucket_policy.cpp
+++ b/cpp/example_code/s3/delete_bucket_policy.cpp
@@ -60,7 +60,7 @@ bool AwsDoc::S3::deleteBucketPolicy(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[]) {
Aws::SDKOptions options;
@@ -89,4 +89,4 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
\ No newline at end of file
+#endif // EXCLUDE_MAIN_FUNCTION
\ No newline at end of file
diff --git a/cpp/example_code/s3/delete_object.cpp b/cpp/example_code/s3/delete_object.cpp
index 1c708715511..2d617a0f086 100644
--- a/cpp/example_code/s3/delete_object.cpp
+++ b/cpp/example_code/s3/delete_object.cpp
@@ -63,7 +63,7 @@ bool AwsDoc::S3::deleteObject(const Aws::String &objectKey,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[]) {
Aws::SDKOptions options;
@@ -94,4 +94,4 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/delete_objects.cpp b/cpp/example_code/s3/delete_objects.cpp
index 782d0ab46e6..e6d16a15ce5 100644
--- a/cpp/example_code/s3/delete_objects.cpp
+++ b/cpp/example_code/s3/delete_objects.cpp
@@ -78,7 +78,7 @@ bool AwsDoc::S3::deleteObjects(const std::vector &objectKeys,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char **argv) {
@@ -116,4 +116,4 @@ int main(int argc, char **argv) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/delete_website_config.cpp b/cpp/example_code/s3/delete_website_config.cpp
index 6a435ff8fab..30ded30c78c 100644
--- a/cpp/example_code/s3/delete_website_config.cpp
+++ b/cpp/example_code/s3/delete_website_config.cpp
@@ -60,7 +60,7 @@ bool AwsDoc::S3::deleteBucketWebsite(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[]) {
Aws::SDKOptions options;
@@ -90,4 +90,4 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
\ No newline at end of file
+#endif // EXCLUDE_MAIN_FUNCTION
\ No newline at end of file
diff --git a/cpp/example_code/s3/get_bucket_acl.cpp b/cpp/example_code/s3/get_bucket_acl.cpp
index 2b6b571876b..c077b0cd6e3 100644
--- a/cpp/example_code/s3/get_bucket_acl.cpp
+++ b/cpp/example_code/s3/get_bucket_acl.cpp
@@ -155,7 +155,7 @@ Aws::String getPermissionString(const Aws::S3::Model::Permission &permission) {
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[]) {
Aws::SDKOptions options;
@@ -184,5 +184,5 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/get_bucket_policy.cpp b/cpp/example_code/s3/get_bucket_policy.cpp
index 2f577db3da4..7417aeb67fb 100644
--- a/cpp/example_code/s3/get_bucket_policy.cpp
+++ b/cpp/example_code/s3/get_bucket_policy.cpp
@@ -69,7 +69,7 @@ bool AwsDoc::S3::getBucketPolicy(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[]) {
Aws::SDKOptions options;
@@ -98,4 +98,4 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
\ No newline at end of file
+#endif // EXCLUDE_MAIN_FUNCTION
\ No newline at end of file
diff --git a/cpp/example_code/s3/get_object.cpp b/cpp/example_code/s3/get_object.cpp
index c1dca3c3bea..9179f82bba2 100644
--- a/cpp/example_code/s3/get_object.cpp
+++ b/cpp/example_code/s3/get_object.cpp
@@ -70,7 +70,7 @@ bool AwsDoc::S3::getObject(const Aws::String &objectKey,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[]) {
Aws::SDKOptions options;
@@ -102,4 +102,4 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
\ No newline at end of file
+#endif // EXCLUDE_MAIN_FUNCTION
\ No newline at end of file
diff --git a/cpp/example_code/s3/get_put_bucket_acl.cpp b/cpp/example_code/s3/get_put_bucket_acl.cpp
index d4ca7a4a42e..a83eecea3e3 100644
--- a/cpp/example_code/s3/get_put_bucket_acl.cpp
+++ b/cpp/example_code/s3/get_put_bucket_acl.cpp
@@ -318,7 +318,7 @@ Aws::S3::Model::Type setGranteeType(const Aws::String &type) {
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
static void usage() {
std::cout << R"(
@@ -385,4 +385,4 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/get_put_object_acl.cpp b/cpp/example_code/s3/get_put_object_acl.cpp
index e8bb08a871c..ff3143faf27 100644
--- a/cpp/example_code/s3/get_put_object_acl.cpp
+++ b/cpp/example_code/s3/get_put_object_acl.cpp
@@ -275,7 +275,7 @@ Aws::S3::Model::Type setGranteeType(const Aws::String &type) {
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
static void usage() {
std::cout << R"(
@@ -344,5 +344,5 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/get_website_config.cpp b/cpp/example_code/s3/get_website_config.cpp
index cca9de0f6b8..380a73ab86c 100644
--- a/cpp/example_code/s3/get_website_config.cpp
+++ b/cpp/example_code/s3/get_website_config.cpp
@@ -74,7 +74,7 @@ bool AwsDoc::S3::getWebsiteConfig(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[])
{
@@ -104,5 +104,5 @@ int main(int argc, char* argv[])
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/list_buckets.cpp b/cpp/example_code/s3/list_buckets.cpp
index 00f80061442..adfaaa47510 100644
--- a/cpp/example_code/s3/list_buckets.cpp
+++ b/cpp/example_code/s3/list_buckets.cpp
@@ -53,7 +53,7 @@ bool AwsDoc::S3::listBuckets(const Aws::S3::S3ClientConfiguration &clientConfig)
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main() {
//The Aws::SDKOptions struct contains SDK configuration options.
@@ -77,4 +77,4 @@ int main() {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/list_buckets_disabling_dns_cache.cpp b/cpp/example_code/s3/list_buckets_disabling_dns_cache.cpp
index c3057a03373..e7e81ff5256 100644
--- a/cpp/example_code/s3/list_buckets_disabling_dns_cache.cpp
+++ b/cpp/example_code/s3/list_buckets_disabling_dns_cache.cpp
@@ -117,7 +117,7 @@ bool AwsDoc::S3::listBucketDisablingDnsCache(const Aws::S3::S3ClientConfiguratio
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char *argv[]) {
SDKOptions options;
@@ -144,4 +144,4 @@ int main(int argc, char *argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/list_objects.cpp b/cpp/example_code/s3/list_objects.cpp
index 3a6b8d2e085..b4cae7fcfca 100644
--- a/cpp/example_code/s3/list_objects.cpp
+++ b/cpp/example_code/s3/list_objects.cpp
@@ -25,11 +25,13 @@
/*!
\param bucketName: Name of the S3 bucket.
\param clientConfig: Aws client configuration.
+ \param[out] keysResult: Vector to receive the keys.
\return bool: Function succeeded.
*/
// snippet-start:[s3.cpp.list_objects.code]
bool AwsDoc::S3::listObjects(const Aws::String &bucketName,
+ Aws::Vector &keysResult,
const Aws::S3::S3ClientConfiguration &clientConfig) {
Aws::S3::S3Client s3Client(clientConfig);
@@ -63,6 +65,7 @@ bool AwsDoc::S3::listObjects(const Aws::String &bucketName,
for (const auto &object: allObjects) {
std::cout << " " << object.GetKey() << std::endl;
+ keysResult.push_back(object.GetKey());
}
return true;
@@ -82,7 +85,7 @@ bool AwsDoc::S3::listObjects(const Aws::String &bucketName,
*.
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[])
{
@@ -105,12 +108,13 @@ int main(int argc, char* argv[])
Aws::S3::S3ClientConfiguration clientConfig;
// Optional: Set to the AWS Region in which the bucket was created (overrides config file).
// clientConfig.region = "us-east-1";
- AwsDoc::S3::listObjects(bucketName, clientConfig);
+ Aws::Vector keysResult;
+ AwsDoc::S3::listObjects(bucketName, keysResult, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/list_objects_with_aws_global_region.cpp b/cpp/example_code/s3/list_objects_with_aws_global_region.cpp
index 172f455e940..dc5e87d8306 100644
--- a/cpp/example_code/s3/list_objects_with_aws_global_region.cpp
+++ b/cpp/example_code/s3/list_objects_with_aws_global_region.cpp
@@ -94,6 +94,7 @@ static bool
listTheObjects(const Aws::S3::S3Client &s3Client, const Aws::String &bucketName) {
// An S3 API client set to the aws-global AWS Region should be able to get
// access to a bucket in any AWS Region.
+
Aws::S3::Model::ListObjectsV2Request listObjectsRequest;
listObjectsRequest.SetBucket(bucketName);
@@ -114,6 +115,7 @@ listTheObjects(const Aws::S3::S3Client &s3Client, const Aws::String &bucketName)
objects.insert(objects.end(), contents.begin(), contents.end());
continuationToken = listObjectOutcome.GetResult().GetNextContinuationToken();
} else {
+
std::cerr << "Error. Could not count the objects in the bucket: " <<
listObjectOutcome.GetError() << std::endl;
return false;
@@ -186,7 +188,7 @@ bool AwsDoc::S3::listObjectsWithAwsGlobalRegion(
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main() {
Aws::SDKOptions options;
@@ -201,4 +203,4 @@ int main() {
return 0;
}
-#endif // TESTING_BUILD
\ No newline at end of file
+#endif // EXCLUDE_MAIN_FUNCTION
\ No newline at end of file
diff --git a/cpp/example_code/s3/presigned_get_object.cpp b/cpp/example_code/s3/presigned_get_object.cpp
index fb68194d050..e3a708b3281 100644
--- a/cpp/example_code/s3/presigned_get_object.cpp
+++ b/cpp/example_code/s3/presigned_get_object.cpp
@@ -127,7 +127,7 @@ bool AwsDoc::S3::getObjectWithPresignedObjectUrl(const Aws::String &presignedURL
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char **argv) {
@@ -169,4 +169,4 @@ int main(int argc, char **argv) {
return 0;
}
-#endif // TESTING_BUILD
\ No newline at end of file
+#endif // EXCLUDE_MAIN_FUNCTION
\ No newline at end of file
diff --git a/cpp/example_code/s3/presigned_put_object.cpp b/cpp/example_code/s3/presigned_put_object.cpp
index e0f767d35a8..76f1d984aaa 100644
--- a/cpp/example_code/s3/presigned_put_object.cpp
+++ b/cpp/example_code/s3/presigned_put_object.cpp
@@ -164,7 +164,7 @@ bool AwsDoc::S3::PutStringWithPresignedObjectURL(const Aws::String &presignedURL
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char **argv) {
@@ -205,4 +205,4 @@ int main(int argc, char **argv) {
return 0;
}
-#endif // TESTING_BUILD
\ No newline at end of file
+#endif // EXCLUDE_MAIN_FUNCTION
\ No newline at end of file
diff --git a/cpp/example_code/s3/put_bucket_acl.cpp b/cpp/example_code/s3/put_bucket_acl.cpp
index 3c7008a90f5..09b83b1a7a6 100644
--- a/cpp/example_code/s3/put_bucket_acl.cpp
+++ b/cpp/example_code/s3/put_bucket_acl.cpp
@@ -158,7 +158,7 @@ Aws::S3::Model::Type setGranteeType(const Aws::String &type) {
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
static void usage() {
std::cout << R"(
@@ -225,4 +225,4 @@ int main(int argc, char* argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/put_bucket_policy.cpp b/cpp/example_code/s3/put_bucket_policy.cpp
index 678085c7e53..76b36c9b39b 100644
--- a/cpp/example_code/s3/put_bucket_policy.cpp
+++ b/cpp/example_code/s3/put_bucket_policy.cpp
@@ -109,7 +109,7 @@ Aws::String getPolicyString(const Aws::String &userArn,
*
b*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[])
{
@@ -165,4 +165,4 @@ int main(int argc, char* argv[])
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/put_object.cpp b/cpp/example_code/s3/put_object.cpp
index 9c02a7e0faf..74e24650fc1 100644
--- a/cpp/example_code/s3/put_object.cpp
+++ b/cpp/example_code/s3/put_object.cpp
@@ -79,7 +79,7 @@ bool AwsDoc::S3::putObject(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[])
{
@@ -113,4 +113,4 @@ int main(int argc, char* argv[])
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/put_object_async.cpp b/cpp/example_code/s3/put_object_async.cpp
index c0ad6d71d24..b671d459a4c 100644
--- a/cpp/example_code/s3/put_object_async.cpp
+++ b/cpp/example_code/s3/put_object_async.cpp
@@ -118,7 +118,7 @@ bool AwsDoc::S3::putObjectAsync(const Aws::S3::S3Client &s3Client,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
// snippet-start:[s3.cpp.put_object_async.invoke.code]
int main(int argc, char* argv[])
@@ -177,4 +177,4 @@ int main(int argc, char* argv[])
// snippet-end:[s3.cpp.put_object_async.invoke.code]
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/put_object_buffer.cpp b/cpp/example_code/s3/put_object_buffer.cpp
index 47e5f50d1de..66bfd75a947 100644
--- a/cpp/example_code/s3/put_object_buffer.cpp
+++ b/cpp/example_code/s3/put_object_buffer.cpp
@@ -70,7 +70,7 @@ bool AwsDoc::S3::putObjectBuffer(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[])
{
@@ -105,4 +105,4 @@ int main(int argc, char* argv[])
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/put_website_config.cpp b/cpp/example_code/s3/put_website_config.cpp
index 4baaef58f6b..54cd57d14d7 100644
--- a/cpp/example_code/s3/put_website_config.cpp
+++ b/cpp/example_code/s3/put_website_config.cpp
@@ -76,7 +76,7 @@ bool AwsDoc::S3::putWebsiteConfig(const Aws::String &bucketName,
*
*/
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, char* argv[])
{
@@ -112,4 +112,4 @@ int main(int argc, char* argv[])
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/s3_demo_for_cloud9.cpp b/cpp/example_code/s3/s3_demo_for_cloud9.cpp
index d8805751c38..c4e141c6fa5 100644
--- a/cpp/example_code/s3/s3_demo_for_cloud9.cpp
+++ b/cpp/example_code/s3/s3_demo_for_cloud9.cpp
@@ -113,7 +113,7 @@ bool DeleteTheBucket(const Aws::S3::S3Client &s3Client,
return outcome.IsSuccess();
}
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
// Create an S3 bucket and then delete it.
// Before and after creating the bucket, and again after deleting the bucket,
// try to determine whether that bucket still exists.
@@ -161,5 +161,5 @@ int main(int argc, char *argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
// snippet-end:[s3.cpp.bucket_operations.list_create_delete]
diff --git a/cpp/example_code/s3/s3_examples.h b/cpp/example_code/s3/s3_examples.h
index 5b3880ecfe9..98cf60a37f9 100644
--- a/cpp/example_code/s3/s3_examples.h
+++ b/cpp/example_code/s3/s3_examples.h
@@ -58,6 +58,7 @@ namespace AwsDoc {
const Aws::S3::S3ClientConfiguration &clientConfig);
bool listObjects(const Aws::String &bucketName,
+ Aws::Vector &keysResult,
const Aws::S3::S3ClientConfiguration &clientConfig);
bool listObjectsWithAwsGlobalRegion(
@@ -128,5 +129,8 @@ namespace AwsDoc {
bool getObjectWithPresignedObjectUrl(const Aws::String &presignedURL,
Aws::String &resultString);
+
+ bool s3ObjectIntegrityWorkflow(
+ const Aws::S3::S3ClientConfiguration &clientConfiguration);
} // namespace S3
} // namespace AwsDoc
diff --git a/cpp/example_code/s3/s3_getting_started_scenario.cpp b/cpp/example_code/s3/s3_getting_started_scenario.cpp
index e50e46c6652..2e670c59b35 100644
--- a/cpp/example_code/s3/s3_getting_started_scenario.cpp
+++ b/cpp/example_code/s3/s3_getting_started_scenario.cpp
@@ -288,7 +288,7 @@ AwsDoc::S3::deleteBucket(const Aws::String &bucketName, Aws::S3::S3Client &clien
}
// snippet-end:[cpp.example_code.s3.Scenario_GettingStarted]
-#ifndef TESTING_BUILD
+#ifndef EXCLUDE_MAIN_FUNCTION
int main(int argc, const char *argv[]) {
@@ -319,6 +319,6 @@ int main(int argc, const char *argv[]) {
return 0;
}
-#endif // TESTING_BUILD
+#endif // EXCLUDE_MAIN_FUNCTION
diff --git a/cpp/example_code/s3/s3_object_integrity_workflow/CMakeLists.txt b/cpp/example_code/s3/s3_object_integrity_workflow/CMakeLists.txt
new file mode 100644
index 00000000000..4e63e72e46b
--- /dev/null
+++ b/cpp/example_code/s3/s3_object_integrity_workflow/CMakeLists.txt
@@ -0,0 +1,66 @@
+# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+# Set the minimum required version of CMake for this project.
+cmake_minimum_required(VERSION 3.13)
+
+set(SERVICE_COMPONENTS s3 transfer)
+
+# Set this project's name.
+project("s3_object_integrity_workflow")
+
+# Build shared libraries by default.
+set(BUILD_SHARED_LIBS ON)
+
+# Set the C++ standard to use to build this target.
+set(CMAKE_CXX_STANDARD 17)
+
+# Use the MSVC variable to determine if this is a Windows build.
+set(WINDOWS_BUILD ${MSVC})
+
+# Set the location of where Windows can find the installed libraries of the SDK.
+if (WINDOWS_BUILD)
+ string(REPLACE ";" "/aws-cpp-sdk-all;" SYSTEM_MODULE_PATH "${CMAKE_SYSTEM_PREFIX_PATH}/aws-cpp-sdk-all")
+ list(APPEND CMAKE_PREFIX_PATH ${SYSTEM_MODULE_PATH})
+endif ()
+
+# Find the AWS SDK for C++ package.
+find_package(AWSSDK REQUIRED COMPONENTS ${SERVICE_COMPONENTS})
+
+if (WINDOWS_BUILD)
+ # Copy relevant AWS SDK for C++ libraries into the current binary directory for running and debugging.
+
+ # set(BIN_SUB_DIR "/Debug") # If you are building from the command line, you may need to uncomment this
+ # and set the proper subdirectory to the executable's location.
+
+ AWSSDK_CPY_DYN_LIBS(SERVICE_COMPONENTS "" ${CMAKE_CURRENT_BINARY_DIR} ${BIN_SUB_DIR})
+endif ()
+
+set(EXAMPLE_EXE run_${PROJECT_NAME})
+
+add_executable(${EXAMPLE_EXE}
+ s3_object_integrity_workflow.cpp
+ main.cpp
+ ../create_bucket.cpp
+ ../delete_objects.cpp
+ ../list_objects.cpp
+ ../delete_bucket.cpp
+)
+
+target_compile_definitions(${EXAMPLE_EXE}
+ PRIVATE
+ EXCLUDE_MAIN_FUNCTION
+ SRC_DIR="${CMAKE_CURRENT_SOURCE_DIR}"
+ LARGE_FILE_DIR="${CMAKE_CURRENT_SOURCE_DIR}/../../../../resources/sample_files/"
+)
+
+target_include_directories(${EXAMPLE_EXE}
+ PRIVATE
+ "${CMAKE_CURRENT_SOURCE_DIR}/.."
+)
+
+target_link_libraries(${EXAMPLE_EXE}
+ PRIVATE
+ ${AWSSDK_LINK_LIBRARIES}
+ ${AWSSDK_PLATFORM_DEPS}
+)
diff --git a/cpp/example_code/s3/s3_object_integrity_workflow/README.md b/cpp/example_code/s3/s3_object_integrity_workflow/README.md
new file mode 100644
index 00000000000..6dbd15fe66f
--- /dev/null
+++ b/cpp/example_code/s3/s3_object_integrity_workflow/README.md
@@ -0,0 +1,46 @@
+# Amazon S3 Object Integrity Workflow
+
+## Overview
+
+- The workflow demonstrates how to use the AWS SDK for C++ to verify the integrity of objects uploaded to Amazon S3.
+- It shows how object integrity is verified for different upload methods: PutObject, TransferManager, and multipart upload.
+- The workflow demonstrates the use of all 5 hash algorithms supported by S3 for object verification: MD5, CRC32, CRC32C, SHA1, and SHA256.
+- This workflow demonstrates the different options provided by the SDK for hashing.
+- To demonstrate how the hashes are calculated, the workflow calculates the hashes in the code and compares the results with the hashes calculated automatically by the SDK.
+
+
+The workflow runs as a command-line application that prompts the user for input.
+
+## Scenario
+
+### Prerequisites
+
+To run this workflow, you'll need the following:
+
+- CMake - A C++ cross-platform build system.
+- AWS SDK for C++.
+
+### Build and Run the Workflow
+
+
+
+```shell
+mkdir build
+cd build
+cmake .. -DCMAKE_BUILD_TYPE=Debug
+cmake --build . --config=Debug
+./run_medical_image_sets_and_frames_workflow
+```
+
+## Additional Resources
+
+- [Checking Object Integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+- [Amazon S3 User Guide](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+- [AWS SDK for C++ Developer Guide](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/welcome.html)
+- [AWS SDK for C++ API Reference](https://sdk.amazonaws.com/cpp/api/LATEST/index.html)
+
+---
+
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+
+SPDX-License-Identifier: Apache-2.0
\ No newline at end of file
diff --git a/cpp/example_code/s3/s3_object_integrity_workflow/main.cpp b/cpp/example_code/s3/s3_object_integrity_workflow/main.cpp
new file mode 100644
index 00000000000..8a636351efc
--- /dev/null
+++ b/cpp/example_code/s3/s3_object_integrity_workflow/main.cpp
@@ -0,0 +1,45 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+/**
+ * Before running this C++ code example, set up your development environment, including your credentials.
+ *
+ * For more information, see the following documentation topic:
+ *
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started.html
+ *
+ * For information on the structure of the code examples and how to build and run the examples, see
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started-code-examples.html.
+ *
+ **/
+
+#include
+#include "s3_examples.h"
+
+/*
+ *
+ * main function
+ *
+ * Usage: 'run_s3_object_integrity_workflow'
+ *
+*/
+
+int main(int argc, char **argv) {
+ (void) argc;
+ (void) argv;
+ Aws::SDKOptions options;
+ options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug;
+ Aws::InitAPI(options);
+ {
+ Aws::S3::S3ClientConfiguration clientConfig;
+ // Optional: Set to the AWS Region in which the bucket was created (overrides config file).
+ // clientConfig.region = "us-east-1";
+
+ AwsDoc::S3::s3ObjectIntegrityWorkflow(
+ clientConfig);
+ }
+ Aws::ShutdownAPI(options);
+
+ return 0;
+}
+
diff --git a/cpp/example_code/s3/s3_object_integrity_workflow/s3_object_integrity_workflow.cpp b/cpp/example_code/s3/s3_object_integrity_workflow/s3_object_integrity_workflow.cpp
new file mode 100644
index 00000000000..2f0ff5af39b
--- /dev/null
+++ b/cpp/example_code/s3/s3_object_integrity_workflow/s3_object_integrity_workflow.cpp
@@ -0,0 +1,1718 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Before running this C++ code example, set up your development environment, including your credentials.
+ *
+ * For more information, see the following documentation topic:
+ *
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started.html
+ *
+ * For information on the structure of the code examples and how to build and run the examples, see
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started-code-examples.html.
+ *
+ **/
+
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "s3_examples.h"
+
+
+#pragma clang diagnostic push
+#pragma ide diagnostic ignored "UnreachableCode"
+namespace AwsDoc::S3 {
+ constexpr char TEST_FILE_KEY[] = "test_file.cpp";
+ constexpr char TEST_FILE[] = __FILE__; // Use this source file as the test file.
+ constexpr char MULTI_PART_TEST_FILE[] = "large_test_file.cpp"; // Large file created for multipart upload tests.
+ constexpr char TEST_BUCKET_PREFIX[] = "integrity-workflow-";
+ constexpr size_t MAX_BUCKET_NAME_LENGTH = 63;
+ const size_t BUFFER_SIZE_IN_MEGABYTES = 5;
+ const size_t UPLOAD_BUFFER_SIZE = BUFFER_SIZE_IN_MEGABYTES * 1024 * 1024;
+
+ const size_t LARGE_FILE_SIZE = 3 * UPLOAD_BUFFER_SIZE;
+
+ static bool gUseCalculatedChecksum = false;
+
+ typedef int HASH_METHOD;
+
+ static const HASH_METHOD DEFAULT = 1;
+ static const HASH_METHOD MD5 = DEFAULT + 1;
+ static const HASH_METHOD CRC32 = MD5 + 1;
+ static const HASH_METHOD CRC32C = CRC32 + 1;
+ static const HASH_METHOD SHA1 = CRC32C + 1;
+ static const HASH_METHOD SHA256 = SHA1 + 1;
+
+ //! A class that provides functionality for calculating hash values of data.
+ /*!
+ The `Hasher` class is responsible for calculating hash values of data using various hashing algorithms, such as MD5, CRC32, CRC32C, SHA1, and SHA256.
+ It provides methods to calculate the hash value of data stored in a `std::vector` or an `Aws::IOStream`, and to retrieve the
+ hash value in different formats (base64, hex, and byte buffer).
+
+ The class maintains an internal `Aws::Utils::ByteBuffer` to store the calculated hash value. The `calculateObjectHash` methods update
+ this internal buffer with the calculated hash value.
+
+ The supported hashing algorithms are defined as static constants within the class:
+ - `MD5`: The default hashing algorithm.
+ - `CRC32`: The CRC32 hashing algorithm.
+ - `CRC32C`: The CRC32C hashing algorithm.
+ - `SHA1`: The SHA1 hashing algorithm.
+ - `SHA256`: The SHA256 hashing algorithm.
+ */
+ class Hasher {
+ Aws::Utils::ByteBuffer m_Hash;
+
+ public:
+ Hasher() = default;
+
+ //! Calculate the object hash for vector input.
+ /*!
+ \param data: A vector of unsigned bytes.
+ \param hashMethod: The hash method to use.
+ \return bool: Function succeeded.
+ */
+ bool calculateObjectHash(std::vector &data,
+ HASH_METHOD hashMethod);
+
+ //! Calculate the object hash for stream input.
+ /*!
+ \param data: An IOStream for input.
+ \param hashMethod: The hash method to use.
+ \return bool: Function succeeded.
+ */
+ bool calculateObjectHash(Aws::IOStream &data,
+ HASH_METHOD hashMethod);
+
+ //! Retrieve the stored hash as a Base64 string.
+ /*!
+ \return String: Hash as Base64 string.
+ */
+ [[nodiscard]] Aws::String getBase64HashString() const;
+
+ //! Retrieve the stored hash as a hexadecimal string.
+ /*!
+ \return String: Hash as hexadecimal string.
+ */
+ [[nodiscard]] Aws::String getHexHashString() const;
+
+ //! Retrieve the stored hash as a ByteBuffer.
+ /*!
+ \return String: Hash as ByteBuffer.
+ */
+ [[nodiscard]] Aws::Utils::ByteBuffer getByteBufferHash() const;
+ };
+
+ //! Routine which uploads an object to an S3 bucket with different object integrity hashing methods.
+ /*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param hashData: The hash value that will be associated with the uploaded object.
+ \param hashMethod: The hashing algorithm to use when calculating the hash value.
+ \param body: The data content of the object being uploaded.
+ \param useDefaultHashMethod: A flag indicating whether to use the default hash method or the one specified in the hashMethod parameter.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return bool: Function succeeded.
+ */
+ bool putObjectWithHash(const Aws::String &bucket, const Aws::String &key,
+ const Aws::String &hashData,
+ AwsDoc::S3::HASH_METHOD hashMethod,
+ const std::shared_ptr &body,
+ bool useDefaultHashMethod,
+ const Aws::S3::S3Client &client);
+
+ //! Routine which retrieves the hash value of an object stored in an S3 bucket.
+ /*!
+ \param bucket: The name of the S3 bucket where the object is stored.
+ \param key: The unique identifier (key) of the object within the S3 bucket.
+ \param hashMethod: The hashing algorithm used to calculate the hash value of the object.
+ \param client: The S3 client instance used to retrieve the object.
+ \param[out] hashValue: The retrieved hash value of the object.
+ \return bool: Function succeeded.
+ */
+ bool retrieveObjectHash(const Aws::String &bucket, const Aws::String &key,
+ AwsDoc::S3::HASH_METHOD hashMethod,
+ Aws::String &hashData,
+ std::vector *partHashes,
+ const Aws::S3::S3Client &client);
+
+ //! Verifies the hashing results between the retrieved and local hashes.
+ /*!
+ \param retrievedHash The hash value retrieved from the remote source.
+ \param localHash The hash value calculated locally.
+ \param uploadtype The type of upload (e.g., "multipart", "single-part").
+ \param hashMethod The hashing method used (e.g., MD5, SHA-256).
+ \param retrievedPartHashes (Optional) The list of hashes for the individual parts retrieved from the remote source.
+ \param localPartHashes (Optional) The list of hashes for the individual parts calculated locally.
+ */
+ void verifyHashingResults(const Aws::String &retrievedHash, const Hasher &localHash,
+ const Aws::String &uploadtype, HASH_METHOD hashMethod,
+ const std::vector &retrievedPartHashes = std::vector(),
+ const std::vector &localPartHashes = std::vector());
+
+ //! Routine which uploads an object to an S3 bucket using the AWS C++ SDK's Transfer Manager.
+ /*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param hashMethod: The hashing algorithm to use when calculating the hash value.
+ \param useDefaultHashMethod: A flag indicating whether to use the default hash method or the one specified in the hashMethod parameter.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return bool: Function succeeded.
+ */
+ bool doTransferManagerUpload(const Aws::String &bucket, const Aws::String &key,
+ AwsDoc::S3::HASH_METHOD hashMethod,
+ bool useDefaultHashMethod,
+ const std::shared_ptr &client);
+
+ //! Routine which calculates the hash values for each part of a file being uploaded to an S3 bucket.
+ /*!
+ \param hashMethod: The hashing algorithm to use when calculating the hash values.
+ \param fileName: The path to the file for which the part hashes will be calculated.
+ \param bufferSize: The size of the buffer to use when reading the file.
+ \param[out] hashDataResult: The Hasher object that will store the concatenated hash value.
+ \param[out] partHashes: The vector that will store the calculated hash values for each part of the file.
+ \return bool: Function succeeded.
+ */
+ bool calculatePartHashesForFile(AwsDoc::S3::HASH_METHOD hashMethod,
+ const Aws::String &fileName,
+ size_t bufferSize,
+ AwsDoc::S3::Hasher &hashDataResult,
+ std::vector &partHashes);
+
+ //! Create a multipart upload.
+ /*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return Aws::String: Upload ID or empty string if failed.
+ */
+ Aws::String
+ createMultipartUpload(const Aws::String &bucket, const Aws::String &key,
+ Aws::S3::Model::ChecksumAlgorithm checksumAlgorithm,
+ const Aws::S3::S3Client &client);
+
+ //! Upload a part to an S3 bucket.
+ /*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param uploadID: An upload ID string.
+ \param partNumber:
+ \param checksumAlgorithm: Checksum algorithm, ignored when NOT_SET.
+ \param calculatedHash: A data integrity hash to set, depending on the checksum algorithm,
+ ignored when it is an empty string.
+ \param body: An shared_ptr IOStream of the data to be uploaded.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return UploadPartOutcome: The outcome.
+ */
+ Aws::S3::Model::UploadPartOutcome uploadPart(const Aws::String &bucket,
+ const Aws::String &key,
+ const Aws::String &uploadID,
+ int partNumber,
+ Aws::S3::Model::ChecksumAlgorithm checksumAlgorithm,
+ const Aws::String &calculatedHash,
+ const std::shared_ptr &body,
+ const Aws::S3::S3Client &client);
+
+ //! Abort a multipart upload to an S3 bucket.
+ /*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param uploadID: An upload ID string.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return bool: Function succeeded.
+ */
+
+ bool abortMultipartUpload(const Aws::String &bucket,
+ const Aws::String &key,
+ const Aws::String &uploadID,
+ const Aws::S3::S3Client &client);
+
+ //! Complete a multipart upload to an S3 bucket.
+ /*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param uploadID: An upload ID string.
+ \param parts: A vector of CompleteParts.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return CompleteMultipartUploadOutcome: The request outcome.
+ */
+ Aws::S3::Model::CompleteMultipartUploadOutcome completeMultipartUpload(const Aws::String &bucket,
+ const Aws::String &key,
+ const Aws::String &uploadID,
+ const Aws::Vector &parts,
+ const Aws::S3::S3Client &client);
+
+ //! Routine which performs a multi-part upload.
+ /*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param hashMethod: The hashing algorithm to use when calculating the hash value.
+ \param ioStream: An IOStream for the data to be uploaded.
+ \param useDefaultHashMethod: A flag indicating whether to use the default hash method or the one specified in the hashMethod parameter.
+ \param[out] hashDataResult: The Hasher object that will store the concatenated hash value.
+ \param[out] partHashes: The vector that will store the calculated hash values for each part of the file.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return bool: Function succeeded.
+ */
+ bool doMultipartUpload(const Aws::String &bucket,
+ const Aws::String &key,
+ AwsDoc::S3::HASH_METHOD hashMethod,
+ const std::shared_ptr &ioStream,
+ bool useDefaultHashMethod,
+ AwsDoc::S3::Hasher &hashDataResult,
+ std::vector &partHashes,
+ const Aws::S3::S3Client &client);
+
+ //! Routine which retrieves the string for a HASH_METHOD constant.
+ /*!
+ \param: hashMethod: A HASH_METHOD constant.
+ \return: String: A string description of the hash method.
+ */
+ Aws::String stringForHashMethod(AwsDoc::S3::HASH_METHOD hashMethod);
+
+ //! Routine that returns the ChecksumAlgorithm for a HASH_METHOD constant.
+ /*!
+ \param: hashMethod: A HASH_METHOD constant.
+ \return: ChecksumAlgorithm: The ChecksumAlgorithm enum.
+ */
+ Aws::S3::Model::ChecksumAlgorithm
+ getChecksumAlgorithmForHashMethod(AwsDoc::S3::HASH_METHOD hashMethod);
+
+ //! Routine which cleans up after the example is complete.
+ /*!
+ \param bucket: The name of the S3 bucket where the object was uploaded.
+ \param clientConfiguration: The client configuration for the S3 client.
+ \return bool: Function succeeded.
+ */
+ bool cleanUp(const Aws::String &bucket,
+ const Aws::S3::S3ClientConfiguration &clientConfiguration);
+
+ //! Console interaction introducing the workflow.
+ /*!
+ \param bucketName: The name of the S3 bucket to use.
+ */
+ void introductoryExplanations(const Aws::String &bucketName);
+
+ //! Console interaction which explains the PutObject results.
+ /*!
+ */
+ void explainPutObjectResults();
+
+ //! Console interaction explaining transfer manager uploads.
+ /*!
+ \param objectKey: The key for the object being uploaded.
+ */
+ void introductoryTransferManagerUploadExplanations(const Aws::String &objectKey);
+
+ //! Console interaction explaining multi-part uploads.
+ /*!
+ \param objectKey: The key for the object being uploaded.
+ \param chosenHashMethod: The hash method selected by the user.
+ */
+ void multiPartUploadExplanations(const Aws::String &objectKey,
+ HASH_METHOD chosenHashMethod);
+
+ //! Create a large file for doing multi-part uploads.
+ /*!
+ */
+ bool createLargeFileIfNotExists();
+
+ //! Test routine passed as argument to askQuestion routine.
+ /*!
+ \param string: A string to test.
+ \return bool: True if empty.
+ */
+ static bool testForEmptyString(const Aws::String &string);
+
+ //! Command line prompt/response utility function.
+ /*!
+ \param string: A question prompt.
+ \param test: Test function for response.
+ \return Aws::String: User's response.
+ */
+ static Aws::String askQuestion(const Aws::String &string,
+ const std::function &test = testForEmptyString);
+
+ //! Command line prompt/response for yes/no question.
+ /*!
+ \param string: A question prompt expecting a 'y' or 'n' response.
+ \return bool: True if yes.
+ */
+ static bool askYesNoQuestion(const Aws::String &string);
+
+ //! Command line prompt/response utility function for an int result confined to
+ //! a range.
+ /*!
+ \param string: A question prompt.
+ \param low: Low inclusive.
+ \param high: High inclusive.
+ \return int: User's response.
+ */
+ static int askQuestionForIntRange(const Aws::String &string, int low,
+ int high);
+
+ //! Utility routine to print a line of asterisks to standard out.
+ /*!
+ \return void:
+ */
+ static void printAsterisksLine() {
+ std::cout << "\n" << std::setfill('*') << std::setw(88) << "\n"
+ << std::endl;
+ }
+
+ //! Test routine passed as argument to askQuestion routine.
+ /*!
+ \return bool: Always true.
+ */
+ static bool alwaysTrueTest(const Aws::String &) { return true; }
+
+ void pressEnterToContinue() {
+ askQuestion("Press Enter to continue...", alwaysTrueTest);
+ } // namespace S3
+} // namespace AwsDoc
+
+// snippet-start:[cpp.example_code.s3.Scenario_ObjectIntegrity]
+//! Routine which runs the S3 object integrity workflow.
+/*!
+ \param clientConfig: Aws client configuration.
+ \return bool: Function succeeded.
+*/
+bool AwsDoc::S3::s3ObjectIntegrityWorkflow(
+ const Aws::S3::S3ClientConfiguration &clientConfiguration) {
+
+ /*
+ * Create a large file to be used for multipart uploads.
+ */
+ if (!createLargeFileIfNotExists()) {
+ std::cerr << "Workflow exiting because large file creation failed." << std::endl;
+ return false;
+ }
+
+ Aws::String bucketName = TEST_BUCKET_PREFIX;
+ bucketName += Aws::Utils::UUID::RandomUUID();
+ bucketName = Aws::Utils::StringUtils::ToLower(bucketName.c_str());
+
+ bucketName.resize(std::min(bucketName.size(), MAX_BUCKET_NAME_LENGTH));
+
+ introductoryExplanations(bucketName);
+
+ if (!AwsDoc::S3::createBucket(bucketName, clientConfiguration)) {
+ std::cerr << "Workflow exiting because bucket creation failed." << std::endl;
+ return false;
+ }
+
+ Aws::S3::S3ClientConfiguration s3ClientConfiguration(clientConfiguration);
+ std::shared_ptr client = Aws::MakeShared("S3Client", s3ClientConfiguration);
+
+ printAsterisksLine();
+ std::cout << "Choose from one of the following checksum algorithms."
+ << std::endl;
+
+ for (HASH_METHOD hashMethod = DEFAULT; hashMethod <= SHA256; ++hashMethod) {
+ std::cout << " " << hashMethod << " - " << stringForHashMethod(hashMethod)
+ << std::endl;
+ }
+
+ HASH_METHOD chosenHashMethod = askQuestionForIntRange("Enter an index: ", DEFAULT,
+ SHA256);
+
+
+ gUseCalculatedChecksum = !askYesNoQuestion(
+ "Let the SDK calculate the checksum for you? (y/n) ");
+
+ printAsterisksLine();
+
+ std::cout << "The workflow will now upload a file using PutObject."
+ << std::endl;
+ std::cout << "Object integrity will be verified using the "
+ << stringForHashMethod(chosenHashMethod) << " algorithm."
+ << std::endl;
+ if (gUseCalculatedChecksum) {
+ std::cout
+ << "A checksum computed by this workflow will be used for object integrity verification,"
+ << std::endl;
+ std::cout << "except for the TransferManager upload." << std::endl;
+ } else {
+ std::cout
+ << "A checksum computed by the SDK will be used for object integrity verification."
+ << std::endl;
+ }
+
+ pressEnterToContinue();
+ printAsterisksLine();
+
+ std::shared_ptr inputData =
+ Aws::MakeShared("SampleAllocationTag",
+ TEST_FILE,
+ std::ios_base::in |
+ std::ios_base::binary);
+
+ if (!*inputData) {
+ std::cerr << "Error unable to read file " << TEST_FILE << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ Hasher hasher;
+ HASH_METHOD putObjectHashMethod = chosenHashMethod;
+ if (putObjectHashMethod == DEFAULT) {
+ putObjectHashMethod = MD5; // MD5 is the default hash method for PutObject.
+
+ std::cout << "The default checksum algorithm for PutObject is "
+ << stringForHashMethod(putObjectHashMethod)
+ << std::endl;
+ }
+
+ // Demonstrate in code how the hash is computed.
+ if (!hasher.calculateObjectHash(*inputData, putObjectHashMethod)) {
+ std::cerr << "Error calculating hash for file " << TEST_FILE << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+ Aws::String key = stringForHashMethod(putObjectHashMethod);
+ key += "_";
+ key += TEST_FILE_KEY;
+ Aws::String localHash = hasher.getBase64HashString();
+
+ // Upload the object with PutObject
+ if (!putObjectWithHash(bucketName, key, localHash, putObjectHashMethod,
+ inputData, chosenHashMethod == DEFAULT,
+ *client)) {
+ std::cerr << "Error putting file " << TEST_FILE << " to bucket "
+ << bucketName << " with key " << key << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ Aws::String retrievedHash;
+ if (!retrieveObjectHash(bucketName, key,
+ putObjectHashMethod, retrievedHash,
+ nullptr, *client)) {
+ std::cerr << "Error getting file " << TEST_FILE << " from bucket "
+ << bucketName << " with key " << key << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ explainPutObjectResults();
+ verifyHashingResults(retrievedHash, hasher,
+ "PutObject upload", putObjectHashMethod);
+
+
+ printAsterisksLine();
+ pressEnterToContinue();
+
+ key = "tr_";
+ key += stringForHashMethod(chosenHashMethod) + "_" + MULTI_PART_TEST_FILE;
+
+ introductoryTransferManagerUploadExplanations(key);
+
+ HASH_METHOD transferManagerHashMethod = chosenHashMethod;
+ if (transferManagerHashMethod == DEFAULT) {
+ transferManagerHashMethod = CRC32; // The default hash method for the TransferManager is CRC32.
+
+ std::cout << "The default checksum algorithm for TransferManager is "
+ << stringForHashMethod(transferManagerHashMethod)
+ << std::endl;
+ }
+
+ // Upload the large file using the transfer manager.
+ if (!doTransferManagerUpload(bucketName, key, transferManagerHashMethod, chosenHashMethod == DEFAULT,
+ client)) {
+ std::cerr << "Exiting because of an error in doTransferManagerUpload." << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ std::vector retrievedTransferManagerPartHashes;
+ Aws::String retrievedTransferManagerFinalHash;
+
+ // Retrieve all the hashes for the TransferManager upload.
+ if (!retrieveObjectHash(bucketName, key,
+ transferManagerHashMethod,
+ retrievedTransferManagerFinalHash,
+ &retrievedTransferManagerPartHashes, *client)) {
+ std::cerr << "Exiting because of an error in retrieveObjectHash for TransferManager." << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ AwsDoc::S3::Hasher locallyCalculatedFinalHash;
+ std::vector locallyCalculatedPartHashes;
+
+ // Calculate the hashes locally to demonstrate how TransferManager hashes are computed.
+ if (!calculatePartHashesForFile(transferManagerHashMethod, MULTI_PART_TEST_FILE,
+ UPLOAD_BUFFER_SIZE,
+ locallyCalculatedFinalHash,
+ locallyCalculatedPartHashes)) {
+ std::cerr << "Exiting because of an error in calculatePartHashesForFile." << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ verifyHashingResults(retrievedTransferManagerFinalHash,
+ locallyCalculatedFinalHash, "TransferManager upload",
+ transferManagerHashMethod,
+ retrievedTransferManagerPartHashes,
+ locallyCalculatedPartHashes);
+
+ printAsterisksLine();
+
+ key = "mp_";
+ key += stringForHashMethod(chosenHashMethod) + "_" + MULTI_PART_TEST_FILE;
+
+ multiPartUploadExplanations(key, chosenHashMethod);
+
+ pressEnterToContinue();
+
+ std::shared_ptr largeFileInputData =
+ Aws::MakeShared("SampleAllocationTag",
+ MULTI_PART_TEST_FILE,
+ std::ios_base::in |
+ std::ios_base::binary);
+
+ if (!largeFileInputData->good()) {
+ std::cerr << "Error unable to read file " << TEST_FILE << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ HASH_METHOD multipartUploadHashMethod = chosenHashMethod;
+ if (multipartUploadHashMethod == DEFAULT) {
+ multipartUploadHashMethod = MD5; // The default hash method for multipart uploads is MD5.
+
+ std::cout << "The default checksum algorithm for multipart upload is "
+ << stringForHashMethod(putObjectHashMethod)
+ << std::endl;
+ }
+
+ AwsDoc::S3::Hasher hashData;
+ std::vector partHashes;
+
+ if (!doMultipartUpload(bucketName, key,
+ multipartUploadHashMethod,
+ largeFileInputData, chosenHashMethod == DEFAULT,
+ hashData,
+ partHashes,
+ *client)) {
+ std::cerr << "Exiting because of an error in doMultipartUpload." << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ std::cout << "Finished multipart upload of with hash method " <<
+ stringForHashMethod(multipartUploadHashMethod) << std::endl;
+
+ std::cout << "Now we will retrieve the checksums from the server." << std::endl;
+
+ retrievedHash.clear();
+ std::vector retrievedPartHashes;
+ if (!retrieveObjectHash(bucketName, key,
+ multipartUploadHashMethod,
+ retrievedHash, &retrievedPartHashes, *client)) {
+ std::cerr << "Exiting because of an error in retrieveObjectHash for multipart." << std::endl;
+ cleanUp(bucketName, clientConfiguration);
+ return false;
+ }
+
+ verifyHashingResults(retrievedHash, hashData, "MultiPart upload",
+ multipartUploadHashMethod,
+ retrievedPartHashes, partHashes);
+
+ printAsterisksLine();
+
+ if (askYesNoQuestion("Would you like to delete the resources created in this workflow? (y/n)")) {
+ return cleanUp(bucketName, clientConfiguration);
+ } else {
+ std::cout << "The bucket " << bucketName << " was not deleted." << std::endl;
+ return true;
+ }
+}
+
+//! Routine which uploads an object to an S3 bucket with different object integrity hashing methods.
+/*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param hashData: The hash value that will be associated with the uploaded object.
+ \param hashMethod: The hashing algorithm to use when calculating the hash value.
+ \param body: The data content of the object being uploaded.
+ \param useDefaultHashMethod: A flag indicating whether to use the default hash method or the one specified in the hashMethod parameter.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return bool: Function succeeded.
+*/
+bool AwsDoc::S3::putObjectWithHash(const Aws::String &bucket, const Aws::String &key,
+ const Aws::String &hashData,
+ AwsDoc::S3::HASH_METHOD hashMethod,
+ const std::shared_ptr &body,
+ bool useDefaultHashMethod,
+ const Aws::S3::S3Client &client) {
+ Aws::S3::Model::PutObjectRequest request;
+ request.SetBucket(bucket);
+ request.SetKey(key);
+ if (!useDefaultHashMethod) {
+ if (hashMethod != MD5) {
+ request.SetChecksumAlgorithm(getChecksumAlgorithmForHashMethod(hashMethod));
+ }
+ }
+
+ if (gUseCalculatedChecksum) {
+ switch (hashMethod) {
+ case AwsDoc::S3::MD5:
+ request.SetContentMD5(hashData);
+ break;
+ case AwsDoc::S3::SHA1:
+ request.SetChecksumSHA1(hashData);
+ break;
+ case AwsDoc::S3::SHA256:
+ request.SetChecksumSHA256(hashData);
+ break;
+ case AwsDoc::S3::CRC32:
+ request.SetChecksumCRC32(hashData);
+ break;
+ case AwsDoc::S3::CRC32C:
+ request.SetChecksumCRC32C(hashData);
+ break;
+ default:
+ std::cerr << "Unknown hash method." << std::endl;
+ return false;
+ }
+ }
+ request.SetBody(body);
+ Aws::S3::Model::PutObjectOutcome outcome = client.PutObject(request);
+ body->seekg(0, body->beg);
+ if (outcome.IsSuccess()) {
+ std::cout << "Object successfully uploaded." << std::endl;
+ } else {
+ std::cerr << "Error uploading object." <<
+ outcome.GetError().GetMessage() << std::endl;
+ }
+ return outcome.IsSuccess();
+}
+
+
+// snippet-start:[cpp.example_code.s3.GetObjectAttributes]
+// ! Routine which retrieves the hash value of an object stored in an S3 bucket.
+/*!
+ \param bucket: The name of the S3 bucket where the object is stored.
+ \param key: The unique identifier (key) of the object within the S3 bucket.
+ \param hashMethod: The hashing algorithm used to calculate the hash value of the object.
+ \param[out] hashData: The retrieved hash.
+ \param[out] partHashes: The part hashes if available.
+ \param client: The S3 client instance used to retrieve the object.
+ \return bool: Function succeeded.
+*/
+bool AwsDoc::S3::retrieveObjectHash(const Aws::String &bucket, const Aws::String &key,
+ AwsDoc::S3::HASH_METHOD hashMethod,
+ Aws::String &hashData,
+ std::vector *partHashes,
+ const Aws::S3::S3Client &client) {
+ Aws::S3::Model::GetObjectAttributesRequest request;
+ request.SetBucket(bucket);
+ request.SetKey(key);
+
+ if (hashMethod == MD5) {
+ Aws::Vector attributes;
+ attributes.push_back(Aws::S3::Model::ObjectAttributes::ETag);
+ request.SetObjectAttributes(attributes);
+
+ Aws::S3::Model::GetObjectAttributesOutcome outcome = client.GetObjectAttributes(
+ request);
+ if (outcome.IsSuccess()) {
+ const Aws::S3::Model::GetObjectAttributesResult &result = outcome.GetResult();
+ hashData = result.GetETag();
+ } else {
+ std::cerr << "Error retrieving object etag attributes." <<
+ outcome.GetError().GetMessage() << std::endl;
+ return false;
+ }
+ } else { // hashMethod != MD5
+ Aws::Vector attributes;
+ attributes.push_back(Aws::S3::Model::ObjectAttributes::Checksum);
+ request.SetObjectAttributes(attributes);
+
+ Aws::S3::Model::GetObjectAttributesOutcome outcome = client.GetObjectAttributes(
+ request);
+ if (outcome.IsSuccess()) {
+ const Aws::S3::Model::GetObjectAttributesResult &result = outcome.GetResult();
+ switch (hashMethod) {
+ case AwsDoc::S3::DEFAULT: // NOLINT(*-branch-clone)
+ break; // Default is not supported.
+#pragma clang diagnostic push
+#pragma ide diagnostic ignored "UnreachableCode"
+ case AwsDoc::S3::MD5:
+ break; // MD5 is not supported.
+#pragma clang diagnostic pop
+ case AwsDoc::S3::SHA1:
+ hashData = result.GetChecksum().GetChecksumSHA1();
+ break;
+ case AwsDoc::S3::SHA256:
+ hashData = result.GetChecksum().GetChecksumSHA256();
+ break;
+ case AwsDoc::S3::CRC32:
+ hashData = result.GetChecksum().GetChecksumCRC32();
+ break;
+ case AwsDoc::S3::CRC32C:
+ hashData = result.GetChecksum().GetChecksumCRC32C();
+ break;
+ default:
+ std::cerr << "Unknown hash method." << std::endl;
+ return false;
+ }
+ } else {
+ std::cerr << "Error retrieving object checksum attributes." <<
+ outcome.GetError().GetMessage() << std::endl;
+ return false;
+ }
+
+ if (nullptr != partHashes) {
+ attributes.clear();
+ attributes.push_back(Aws::S3::Model::ObjectAttributes::ObjectParts);
+ request.SetObjectAttributes(attributes);
+ outcome = client.GetObjectAttributes(request);
+ if (outcome.IsSuccess()) {
+ const Aws::S3::Model::GetObjectAttributesResult &result = outcome.GetResult();
+ const Aws::Vector parts = result.GetObjectParts().GetParts();
+ for (const Aws::S3::Model::ObjectPart &part: parts) {
+ switch (hashMethod) {
+ case AwsDoc::S3::DEFAULT: // Default is not supported. NOLINT(*-branch-clone)
+ break;
+ case AwsDoc::S3::MD5: // MD5 is not supported.
+ break;
+ case AwsDoc::S3::SHA1:
+ partHashes->push_back(part.GetChecksumSHA1());
+ break;
+ case AwsDoc::S3::SHA256:
+ partHashes->push_back(part.GetChecksumSHA256());
+ break;
+ case AwsDoc::S3::CRC32:
+ partHashes->push_back(part.GetChecksumCRC32());
+ break;
+ case AwsDoc::S3::CRC32C:
+ partHashes->push_back(part.GetChecksumCRC32C());
+ break;
+ default:
+ std::cerr << "Unknown hash method." << std::endl;
+ return false;
+ }
+ }
+ } else {
+ std::cerr << "Error retrieving object attributes for object parts." <<
+ outcome.GetError().GetMessage() << std::endl;
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+// snippet-end:[cpp.example_code.s3.GetObjectAttributes]
+
+//! Verifies the hashing results between the retrieved and local hashes.
+/*!
+ \param retrievedHash The hash value retrieved from the remote source.
+ \param localHash The hash value calculated locally.
+ \param uploadtype The type of upload (e.g., "multipart", "single-part").
+ \param hashMethod The hashing method used (e.g., MD5, SHA-256).
+ \param retrievedPartHashes (Optional) The list of hashes for the individual parts retrieved from the remote source.
+ \param localPartHashes (Optional) The list of hashes for the individual parts calculated locally.
+ */
+void AwsDoc::S3::verifyHashingResults(const Aws::String &retrievedHash,
+ const Hasher &localHash,
+ const Aws::String &uploadtype,
+ HASH_METHOD hashMethod,
+ const std::vector &retrievedPartHashes,
+ const std::vector &localPartHashes) {
+ std::cout << "For " << uploadtype << " retrieved hash is " << retrievedHash << std::endl;
+ if (!retrievedPartHashes.empty()) {
+ std::cout << retrievedPartHashes.size() << " part hash(es) were also retrieved."
+ << std::endl;
+ for (auto &retrievedPartHash: retrievedPartHashes) {
+ std::cout << " Part hash " << retrievedPartHash << std::endl;
+ }
+ }
+ Aws::String hashString;
+ if (hashMethod == MD5) {
+ hashString = localHash.getHexHashString();
+ if (!localPartHashes.empty()) {
+ hashString += "-" + std::to_string(localPartHashes.size());
+ }
+ } else {
+ hashString = localHash.getBase64HashString();
+ }
+
+ bool allMatch = true;
+ if (hashString != retrievedHash) {
+ std::cerr << "For " << uploadtype << ", the main hashes do not match" << std::endl;
+ std::cerr << "Local hash- '" << hashString << "'" << std::endl;
+ std::cerr << "Remote hash - '" << retrievedHash << "'" << std::endl;
+ allMatch = false;
+ }
+
+ if (hashMethod != MD5) {
+ if (localPartHashes.size() != retrievedPartHashes.size()) {
+ std::cerr << "For " << uploadtype << ", the number of part hashes do not match" << std::endl;
+ std::cerr << "Local number of hashes- '" << localPartHashes.size() << "'"
+ << std::endl;
+ std::cerr << "Remote number of hashes - '"
+ << retrievedPartHashes.size()
+ << "'" << std::endl;
+ }
+
+ for (int i = 0; i < localPartHashes.size(); ++i) {
+ if (localPartHashes[i] != retrievedPartHashes[i]) {
+ std::cerr << "For " << uploadtype << ", the part hashes do not match for part " << i + 1
+ << "." << std::endl;
+ std::cerr << "Local hash- '" << localPartHashes[i] << "'"
+ << std::endl;
+ std::cerr << "Remote hash - '" << retrievedPartHashes[i] << "'"
+ << std::endl;
+ allMatch = false;
+ }
+ }
+ }
+
+ if (allMatch) {
+ std::cout << "For " << uploadtype << ", locally and remotely calculated hashes all match!" << std::endl;
+ }
+
+}
+
+static void transferManagerErrorCallback(const Aws::Transfer::TransferManager *,
+ const std::shared_ptr &,
+ const Aws::Client::AWSError &err) {
+ std::cerr << "Error during transfer: '" << err.GetMessage() << "'" << std::endl;
+}
+
+static void transferManagerStatusCallback(const Aws::Transfer::TransferManager *,
+ const std::shared_ptr &handle) {
+ if (handle->GetStatus() == Aws::Transfer::TransferStatus::IN_PROGRESS) {
+ std::cout << "Bytes transferred: " << handle->GetBytesTransferred() << std::endl;
+ }
+}
+
+//! Routine which uploads an object to an S3 bucket using the AWS C++ SDK's Transfer Manager.
+/*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param hashMethod: The hashing algorithm to use when calculating the hash value.
+ \param useDefaultHashMethod: A flag indicating whether to use the default hash method or the one specified in the hashMethod parameter.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return bool: Function succeeded.
+*/
+bool
+AwsDoc::S3::doTransferManagerUpload(const Aws::String &bucket, const Aws::String &key,
+ AwsDoc::S3::HASH_METHOD hashMethod,
+ bool useDefaultHashMethod,
+ const std::shared_ptr &client) {
+ std::shared_ptr executor = Aws::MakeShared(
+ "executor", 25);
+ Aws::Transfer::TransferManagerConfiguration transfer_config(executor.get());
+ transfer_config.s3Client = client;
+ transfer_config.bufferSize = UPLOAD_BUFFER_SIZE;
+ if (!useDefaultHashMethod) {
+ if (hashMethod == MD5) {
+ transfer_config.computeContentMD5 = true;
+ } else {
+ transfer_config.checksumAlgorithm = getChecksumAlgorithmForHashMethod(
+ hashMethod);
+ }
+ }
+ transfer_config.errorCallback = transferManagerErrorCallback;
+ transfer_config.transferStatusUpdatedCallback = transferManagerStatusCallback;
+
+ std::shared_ptr transfer_manager = Aws::Transfer::TransferManager::Create(
+ transfer_config);
+
+ std::cout << "Uploading the file..." << std::endl;
+ std::shared_ptr uploadHandle = transfer_manager->UploadFile(MULTI_PART_TEST_FILE,
+ bucket, key,
+ "text/plain",
+ Aws::Map());
+ uploadHandle->WaitUntilFinished();
+ bool success =
+ uploadHandle->GetStatus() == Aws::Transfer::TransferStatus::COMPLETED;
+ if (!success) {
+ Aws::Client::AWSError err = uploadHandle->GetLastError();
+ std::cerr << "File upload failed: " << err.GetMessage() << std::endl;
+ }
+
+ return success;
+}
+
+//! Routine which calculates the hash values for each part of a file being uploaded to an S3 bucket.
+/*!
+ \param hashMethod: The hashing algorithm to use when calculating the hash values.
+ \param fileName: The path to the file for which the part hashes will be calculated.
+ \param bufferSize: The size of the buffer to use when reading the file.
+ \param[out] hashDataResult: The Hasher object that will store the concatenated hash value.
+ \param[out] partHashes: The vector that will store the calculated hash values for each part of the file.
+ \return bool: Function succeeded.
+*/
+bool AwsDoc::S3::calculatePartHashesForFile(AwsDoc::S3::HASH_METHOD hashMethod,
+ const Aws::String &fileName,
+ size_t bufferSize,
+ AwsDoc::S3::Hasher &hashDataResult,
+ std::vector &partHashes) {
+ std::ifstream fileStream(fileName.c_str(), std::ifstream::binary);
+ fileStream.seekg(0, std::ifstream::end);
+ size_t objectSize = fileStream.tellg();
+ fileStream.seekg(0, std::ifstream::beg);
+ std::vector totalHashBuffer;
+ size_t uploadedBytes = 0;
+
+
+ while (uploadedBytes < objectSize) {
+ std::vector buffer(bufferSize);
+ std::streamsize bytesToRead = static_cast(std::min(buffer.size(), objectSize - uploadedBytes));
+ fileStream.read((char *) buffer.data(), bytesToRead);
+ Aws::Utils::Stream::PreallocatedStreamBuf preallocatedStreamBuf(buffer.data(),
+ bytesToRead);
+ std::shared_ptr body =
+ Aws::MakeShared("SampleAllocationTag",
+ &preallocatedStreamBuf);
+ Hasher hasher;
+ if (!hasher.calculateObjectHash(*body, hashMethod)) {
+ std::cerr << "Error calculating hash." << std::endl;
+ return false;
+ }
+ Aws::String base64HashString = hasher.getBase64HashString();
+ partHashes.push_back(base64HashString);
+
+ Aws::Utils::ByteBuffer hashBuffer = hasher.getByteBufferHash();
+
+ totalHashBuffer.insert(totalHashBuffer.end(), hashBuffer.GetUnderlyingData(),
+ hashBuffer.GetUnderlyingData() + hashBuffer.GetLength());
+
+ uploadedBytes += bytesToRead;
+ }
+
+ return hashDataResult.calculateObjectHash(totalHashBuffer, hashMethod);
+}
+
+// snippet-start:[cpp.example_code.s3.CreateMultipartUpload]
+//! Create a multipart upload.
+/*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return Aws::String: Upload ID or empty string if failed.
+*/
+Aws::String
+AwsDoc::S3::createMultipartUpload(const Aws::String &bucket, const Aws::String &key,
+ Aws::S3::Model::ChecksumAlgorithm checksumAlgorithm,
+ const Aws::S3::S3Client &client) {
+ Aws::S3::Model::CreateMultipartUploadRequest request;
+ request.SetBucket(bucket);
+ request.SetKey(key);
+
+ if (checksumAlgorithm != Aws::S3::Model::ChecksumAlgorithm::NOT_SET) {
+ request.SetChecksumAlgorithm(checksumAlgorithm);
+ }
+
+ Aws::S3::Model::CreateMultipartUploadOutcome outcome =
+ client.CreateMultipartUpload(request);
+
+ Aws::String uploadID;
+ if (outcome.IsSuccess()) {
+ uploadID = outcome.GetResult().GetUploadId();
+ } else {
+ std::cerr << "Error creating multipart upload: " << outcome.GetError().GetMessage() << std::endl;
+ }
+
+ return uploadID;
+}
+// snippet-end:[cpp.example_code.s3.CreateMultipartUpload]
+
+// snippet-start:[cpp.example_code.s3.UploadPart]
+//! Upload a part to an S3 bucket.
+/*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param uploadID: An upload ID string.
+ \param partNumber:
+ \param checksumAlgorithm: Checksum algorithm, ignored when NOT_SET.
+ \param calculatedHash: A data integrity hash to set, depending on the checksum algorithm,
+ ignored when it is an empty string.
+ \param body: An shared_ptr IOStream of the data to be uploaded.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return UploadPartOutcome: The outcome.
+*/
+
+Aws::S3::Model::UploadPartOutcome AwsDoc::S3::uploadPart(const Aws::String &bucket,
+ const Aws::String &key,
+ const Aws::String &uploadID,
+ int partNumber,
+ Aws::S3::Model::ChecksumAlgorithm checksumAlgorithm,
+ const Aws::String &calculatedHash,
+ const std::shared_ptr &body,
+ const Aws::S3::S3Client &client) {
+ Aws::S3::Model::UploadPartRequest request;
+ request.SetBucket(bucket);
+ request.SetKey(key);
+ request.SetUploadId(uploadID);
+ request.SetPartNumber(partNumber);
+ if (checksumAlgorithm != Aws::S3::Model::ChecksumAlgorithm::NOT_SET) {
+ request.SetChecksumAlgorithm(checksumAlgorithm);
+ }
+ request.SetBody(body);
+
+ if (!calculatedHash.empty()) {
+ switch (checksumAlgorithm) {
+ case Aws::S3::Model::ChecksumAlgorithm::NOT_SET:
+ request.SetContentMD5(calculatedHash);
+ break;
+ case Aws::S3::Model::ChecksumAlgorithm::CRC32:
+ request.SetChecksumCRC32(calculatedHash);
+ break;
+ case Aws::S3::Model::ChecksumAlgorithm::CRC32C:
+ request.SetChecksumCRC32C(calculatedHash);
+ break;
+ case Aws::S3::Model::ChecksumAlgorithm::SHA1:
+ request.SetChecksumSHA1(calculatedHash);
+ break;
+ case Aws::S3::Model::ChecksumAlgorithm::SHA256:
+ request.SetChecksumSHA256(calculatedHash);
+ break;
+ }
+ }
+
+ return client.UploadPart(request);
+}
+// snippet-end:[cpp.example_code.s3.UploadPart]
+
+// snippet-start:[cpp.example_code.s3.AbortMultipartUpload]
+//! Abort a multipart upload to an S3 bucket.
+/*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param uploadID: An upload ID string.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return bool: Function succeeded.
+*/
+
+bool AwsDoc::S3::abortMultipartUpload(const Aws::String &bucket,
+ const Aws::String &key,
+ const Aws::String &uploadID,
+ const Aws::S3::S3Client &client) {
+ Aws::S3::Model::AbortMultipartUploadRequest request;
+ request.SetBucket(bucket);
+ request.SetKey(key);
+ request.SetUploadId(uploadID);
+
+ Aws::S3::Model::AbortMultipartUploadOutcome outcome =
+ client.AbortMultipartUpload(request);
+
+ if (outcome.IsSuccess()) {
+ std::cout << "Multipart upload aborted." << std::endl;
+ } else {
+ std::cerr << "Error aborting multipart upload: " << outcome.GetError().GetMessage() << std::endl;
+ }
+
+ return outcome.IsSuccess();
+}
+// snippet-end:[cpp.example_code.s3.AbortMultipartUpload]
+
+// snippet-start:[cpp.example_code.s3.CompleteMultipartUpload]
+//! Complete a multipart upload to an S3 bucket.
+/*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param uploadID: An upload ID string.
+ \param parts: A vector of CompleteParts.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return CompleteMultipartUploadOutcome: The request outcome.
+*/
+Aws::S3::Model::CompleteMultipartUploadOutcome AwsDoc::S3::completeMultipartUpload(const Aws::String &bucket,
+ const Aws::String &key,
+ const Aws::String &uploadID,
+ const Aws::Vector &parts,
+ const Aws::S3::S3Client &client) {
+ Aws::S3::Model::CompletedMultipartUpload completedMultipartUpload;
+ completedMultipartUpload.SetParts(parts);
+
+ Aws::S3::Model::CompleteMultipartUploadRequest request;
+ request.SetBucket(bucket);
+ request.SetKey(key);
+ request.SetUploadId(uploadID);
+ request.SetMultipartUpload(completedMultipartUpload);
+
+ Aws::S3::Model::CompleteMultipartUploadOutcome outcome =
+ client.CompleteMultipartUpload(request);
+
+ if (!outcome.IsSuccess()) {
+ std::cerr << "Error completing multipart upload: " << outcome.GetError().GetMessage() << std::endl;
+ }
+ return outcome;
+}
+// snippet-end:[cpp.example_code.s3.CompleteMultipartUpload]
+
+//! Routine which performs a multi-part upload.
+/*!
+ \param bucket: The name of the S3 bucket where the object will be uploaded.
+ \param key: The unique identifier (key) for the object within the S3 bucket.
+ \param hashMethod: The hashing algorithm to use when calculating the hash value.
+ \param ioStream: An IOStream for the data to be uploaded.
+ \param useDefaultHashMethod: A flag indicating whether to use the default hash method or the one specified in the hashMethod parameter.
+ \param[out] hashDataResult: The Hasher object that will store the concatenated hash value.
+ \param[out] partHashes: The vector that will store the calculated hash values for each part of the file.
+ \param client: The S3 client instance used to perform the upload operation.
+ \return bool: Function succeeded.
+*/
+bool AwsDoc::S3::doMultipartUpload(const Aws::String &bucket,
+ const Aws::String &key,
+ AwsDoc::S3::HASH_METHOD hashMethod,
+ const std::shared_ptr &ioStream,
+ bool useDefaultHashMethod,
+ AwsDoc::S3::Hasher &hashDataResult,
+ std::vector &partHashes,
+ const Aws::S3::S3Client &client) {
+ // Get object size.
+ ioStream->seekg(0, ioStream->end);
+ size_t objectSize = ioStream->tellg();
+ ioStream->seekg(0, ioStream->beg);
+
+ Aws::S3::Model::ChecksumAlgorithm checksumAlgorithm = Aws::S3::Model::ChecksumAlgorithm::NOT_SET;
+ if (!useDefaultHashMethod) {
+ if (hashMethod != MD5) {
+ checksumAlgorithm = getChecksumAlgorithmForHashMethod(hashMethod);
+ }
+ }
+ Aws::String uploadID = createMultipartUpload(bucket, key, checksumAlgorithm, client);
+ if (uploadID.empty()) {
+ return false;
+ }
+
+ std::vector totalHashBuffer;
+ bool uploadSucceeded = true;
+ std::streamsize uploadedBytes = 0;
+ int partNumber = 1;
+ Aws::Vector parts;
+ while (uploadedBytes < objectSize) {
+ std::cout << "Uploading part " << partNumber << "." << std::endl;
+
+ std::vector buffer(UPLOAD_BUFFER_SIZE);
+ std::streamsize bytesToRead = static_cast(std::min(buffer.size(),
+ objectSize - uploadedBytes));
+ ioStream->read((char *) buffer.data(), bytesToRead);
+ Aws::Utils::Stream::PreallocatedStreamBuf preallocatedStreamBuf(buffer.data(),
+ bytesToRead);
+ std::shared_ptr body =
+ Aws::MakeShared("SampleAllocationTag",
+ &preallocatedStreamBuf);
+
+ Hasher hasher;
+ if (!hasher.calculateObjectHash(*body, hashMethod)) {
+ std::cerr << "Error calculating hash." << std::endl;
+ uploadSucceeded = false;
+ break;
+ }
+
+ Aws::String base64HashString = hasher.getBase64HashString();
+ partHashes.push_back(base64HashString);
+
+ Aws::Utils::ByteBuffer hashBuffer = hasher.getByteBufferHash();
+
+ totalHashBuffer.insert(totalHashBuffer.end(), hashBuffer.GetUnderlyingData(),
+ hashBuffer.GetUnderlyingData() + hashBuffer.GetLength());
+
+ Aws::String calculatedHash;
+ if (gUseCalculatedChecksum) {
+ calculatedHash = base64HashString;
+ }
+ Aws::S3::Model::UploadPartOutcome uploadPartOutcome = uploadPart(bucket, key, uploadID, partNumber,
+ checksumAlgorithm, base64HashString, body,
+ client);
+ if (uploadPartOutcome.IsSuccess()) {
+ const Aws::S3::Model::UploadPartResult &uploadPartResult = uploadPartOutcome.GetResult();
+ Aws::S3::Model::CompletedPart completedPart;
+ completedPart.SetETag(uploadPartResult.GetETag());
+ completedPart.SetPartNumber(partNumber);
+ switch (hashMethod) {
+ case AwsDoc::S3::MD5:
+ break; // Do nothing.
+ case AwsDoc::S3::SHA1:
+ completedPart.SetChecksumSHA1(uploadPartResult.GetChecksumSHA1());
+ break;
+ case AwsDoc::S3::SHA256:
+ completedPart.SetChecksumSHA256(uploadPartResult.GetChecksumSHA256());
+ break;
+ case AwsDoc::S3::CRC32:
+ completedPart.SetChecksumCRC32(uploadPartResult.GetChecksumCRC32());
+ break;
+ case AwsDoc::S3::CRC32C:
+ completedPart.SetChecksumCRC32C(uploadPartResult.GetChecksumCRC32C());
+ break;
+ default:
+ std::cerr << "Unhandled hash method for completedPart." << std::endl;
+ break;
+ }
+
+ parts.push_back(completedPart);
+ } else {
+ std::cerr << "Error uploading part. " <<
+ uploadPartOutcome.GetError().GetMessage() << std::endl;
+ uploadSucceeded = false;
+ break;
+ }
+
+ uploadedBytes += bytesToRead;
+ partNumber++;
+ }
+
+ if (!uploadSucceeded) {
+ abortMultipartUpload(bucket, key, uploadID, client);
+ return false;
+ } else {
+
+ Aws::S3::Model::CompleteMultipartUploadOutcome completeMultipartUploadOutcome = completeMultipartUpload(bucket,
+ key,
+ uploadID,
+ parts,
+ client);
+
+ if (completeMultipartUploadOutcome.IsSuccess()) {
+ std::cout << "Multipart upload completed." << std::endl;
+ if (!hashDataResult.calculateObjectHash(totalHashBuffer, hashMethod)) {
+ std::cerr << "Error calculating hash." << std::endl;
+ return false;
+ }
+ } else {
+ std::cerr << "Error completing multipart upload." <<
+ completeMultipartUploadOutcome.GetError().GetMessage()
+ << std::endl;
+ }
+
+ return completeMultipartUploadOutcome.IsSuccess();
+ }
+}
+
+//! Routine which retrieves the string for a HASH_METHOD constant.
+/*!
+ \param: hashMethod: A HASH_METHOD constant.
+ \return: String: A string description of the hash method.
+*/
+Aws::String AwsDoc::S3::stringForHashMethod(AwsDoc::S3::HASH_METHOD hashMethod) {
+ switch (hashMethod) {
+ case AwsDoc::S3::DEFAULT:
+ return "Default";
+ case AwsDoc::S3::MD5:
+ return "MD5";
+ case AwsDoc::S3::SHA1:
+ return "SHA1";
+ case AwsDoc::S3::SHA256:
+ return "SHA256";
+ case AwsDoc::S3::CRC32:
+ return "CRC32";
+ case AwsDoc::S3::CRC32C:
+ return "CRC32C";
+ default:
+ return "Unknown";
+ }
+}
+
+//! Routine that returns the ChecksumAlgorithm for a HASH_METHOD constant.
+/*!
+ \param: hashMethod: A HASH_METHOD constant.
+ \return: ChecksumAlgorithm: The ChecksumAlgorithm enum.
+*/
+Aws::S3::Model::ChecksumAlgorithm
+AwsDoc::S3::getChecksumAlgorithmForHashMethod(AwsDoc::S3::HASH_METHOD hashMethod) {
+ Aws::S3::Model::ChecksumAlgorithm result = Aws::S3::Model::ChecksumAlgorithm::NOT_SET;
+ switch (hashMethod) {
+ case AwsDoc::S3::DEFAULT:
+ std::cerr << "getChecksumAlgorithmForHashMethod- DEFAULT is not valid." << std::endl;
+ break; // Default is not supported.
+ case AwsDoc::S3::MD5:
+ break; // Ignore MD5.
+ case AwsDoc::S3::SHA1:
+ result = Aws::S3::Model::ChecksumAlgorithm::SHA1;
+ break;
+ case AwsDoc::S3::SHA256:
+ result = Aws::S3::Model::ChecksumAlgorithm::SHA256;
+ break;
+ case AwsDoc::S3::CRC32:
+ result = Aws::S3::Model::ChecksumAlgorithm::CRC32;
+ break;
+ case AwsDoc::S3::CRC32C:
+ result = Aws::S3::Model::ChecksumAlgorithm::CRC32C;
+ break;
+ default:
+ std::cerr << "Unknown hash method." << std::endl;
+ break;
+
+ }
+
+ return result;
+}
+
+//! Routine which cleans up after the example is complete.
+/*!
+ \param bucket: The name of the S3 bucket where the object was uploaded.
+ \param clientConfiguration: The client configuration for the S3 client.
+ \return bool: Function succeeded.
+*/
+bool AwsDoc::S3::cleanUp(const Aws::String &bucketName,
+ const Aws::S3::S3ClientConfiguration &clientConfiguration) {
+
+ Aws::Vector keysResult;
+ bool result = true;
+ if (AwsDoc::S3::listObjects(bucketName, keysResult, clientConfiguration)) {
+ if (!keysResult.empty()) {
+ result = AwsDoc::S3::deleteObjects(keysResult, bucketName,
+ clientConfiguration);
+ }
+ } else {
+ result = false;
+ }
+
+ return result && AwsDoc::S3::deleteBucket(bucketName, clientConfiguration);
+}
+
+//! Console interaction introducing the workflow.
+/*!
+ \param bucketName: The name of the S3 bucket to use.
+*/
+void AwsDoc::S3::introductoryExplanations(const Aws::String &bucketName) {
+
+ std::cout
+ << "Welcome to the Amazon Simple Storage Service (Amazon S3) object integrity workflow."
+ << std::endl;
+ printAsterisksLine();
+ std::cout
+ << "This workflow demonstrates how Amazon S3 uses checksum values to verify the integrity of data\n";
+ std::cout << "uploaded to Amazon S3 buckets" << std::endl;
+ std::cout
+ << "The AWS SDK for C++ automatically handles checksums.\n";
+ std::cout
+ << "By default it calculates a checksum that is uploaded with an object.\n"
+ << "The default checksum algorithm for PutObject and MultiPart upload is an MD5 hash.\n"
+ << "The default checksum algorithm for TransferManager uploads is a CRC32 checksum."
+ << std::endl;
+ std::cout
+ << "You can override the default behavior, requiring one of the following checksums,\n";
+ std::cout << "MD5, CRC32, CRC32C, SHA-1 or SHA-256." << std::endl;
+ std::cout << "You can also set the checksum hash value, instead of letting the SDK calculate the value."
+ << std::endl;
+ std::cout
+ << "For more information, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html."
+ << std::endl;
+
+ std::cout
+ << "This workflow will locally compute checksums for files uploaded to an Amazon S3 bucket,\n";
+ std::cout << "even when the SDK also computes the checksum." << std::endl;
+ std::cout
+ << "This is done to provide demonstration code for how the checksums are calculated."
+ << std::endl;
+ std::cout << "A bucket named '" << bucketName << "' will be created for the object uploads."
+ << std::endl;
+}
+
+//! Console interaction which explains the PutObject results.
+/*!
+*/
+void AwsDoc::S3::explainPutObjectResults() {
+
+ std::cout << "The upload was successful.\n";
+ std::cout << "If the checksums had not matched, the upload would have failed."
+ << std::endl;
+ std::cout
+ << "The checksums calculated by the server have been retrieved using the GetObjectAttributes."
+ << std::endl;
+ std::cout
+ << "The locally calculated checksums have been verified against the retrieved checksums."
+ << std::endl;
+}
+
+//! Console interaction explaining transfer manager uploads.
+/*!
+ \param objectKey: The key for the object being uploaded.
+*/
+void AwsDoc::S3::introductoryTransferManagerUploadExplanations(
+ const Aws::String &objectKey) {
+ std::cout
+ << "Now the workflow will demonstrate object integrity for TransferManager multi-part uploads."
+ << std::endl;
+ std::cout
+ << "The AWS C++ SDK has a TransferManager class which simplifies multipart uploads."
+ << std::endl;
+ std::cout
+ << "The following code lets the TransferManager handle much of the checksum configuration."
+ << std::endl;
+
+ std::cout << "An object with the key '" << objectKey
+ << " will be uploaded by the TransferManager using a "
+ << BUFFER_SIZE_IN_MEGABYTES << " MB buffer." << std::endl;
+ if (gUseCalculatedChecksum) {
+ std::cout << "For TransferManager uploads, this demo always lets the SDK calculate the hash value."
+ << std::endl;
+ }
+
+ pressEnterToContinue();
+ printAsterisksLine();
+}
+
+//! Console interaction explaining multi-part uploads.
+/*!
+ \param objectKey: The key for the object being uploaded.
+ \param chosenHashMethod: The hash method selected by the user.
+*/
+void AwsDoc::S3::multiPartUploadExplanations(const Aws::String &objectKey,
+ HASH_METHOD chosenHashMethod) {
+ std::cout
+ << "Now we will provide an in-depth demonstration of multi-part uploading by calling the multi-part upload APIs directly."
+ << std::endl;
+ std::cout << "These are the same APIs used by the TransferManager when uploading large files."
+ << std::endl;
+ std::cout
+ << "In the following code, the checksums are also calculated locally and then compared."
+ << std::endl;
+ std::cout
+ << "For multi-part uploads, a checksum is uploaded with each part. The final checksum is a concatenation of"
+ << std::endl;
+ std::cout << "the checksums for each part." << std::endl;
+ std::cout
+ << "This is explained in the user guide, https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html,\""
+ << " in the section \"Using part-level checksums for multipart uploads\"." << std::endl;
+
+ std::cout << "Starting multipart upload of with hash method " <<
+ stringForHashMethod(chosenHashMethod) << " uploading to with object key\n"
+ << "'" << objectKey << "'," << std::endl;
+
+}
+
+//! Create a large file for doing multi-part uploads.
+/*!
+*/
+bool AwsDoc::S3::createLargeFileIfNotExists() {
+ // Generate a large file by writing this source file multiple times to a new file.
+ if (std::filesystem::exists(MULTI_PART_TEST_FILE)) {
+ return true;
+ }
+
+ std::ofstream newFile(MULTI_PART_TEST_FILE, std::ios::out
+
+ | std::ios::binary);
+
+ if (!newFile) {
+ std::cerr << "createLargeFileIfNotExists- Error creating file " << MULTI_PART_TEST_FILE <<
+ std::endl;
+ return false;
+ }
+
+ std::ifstream input(TEST_FILE, std::ios::in
+
+ | std::ios::binary);
+ if (!input) {
+ std::cerr << "Error opening file " << TEST_FILE <<
+ std::endl;
+ return false;
+ }
+ std::stringstream buffer;
+ buffer << input.rdbuf();
+
+ input.close();
+
+ while (newFile.tellp() < LARGE_FILE_SIZE && !newFile.bad()) {
+ buffer.seekg(std::stringstream::beg);
+ newFile << buffer.rdbuf();
+ }
+
+ newFile.close();
+
+ return true;
+}
+// snippet-end:[cpp.example_code.s3.Scenario_ObjectIntegrity]
+
+//! Calculate the object hash for vector input.
+/*!
+ \param data: A vector of unsigned bytes.
+ \param hashMethod: The hash method to use.
+ \return bool: Function succeeded.
+*/
+bool AwsDoc::S3::Hasher::calculateObjectHash(std::vector &data,
+ AwsDoc::S3::HASH_METHOD hashMethod) {
+ Aws::Utils::Stream::PreallocatedStreamBuf preallocatedStreamBuf(data.data(),
+ data.size());
+ std::shared_ptr body =
+ Aws::MakeShared("SampleAllocationTag",
+ &preallocatedStreamBuf);
+ return calculateObjectHash(*body, hashMethod);
+}
+
+//! Calculate the object hash for stream input.
+/*!
+ \param data: An IOStream for input.
+ \param hashMethod: The hash method to use.
+ \return bool: Function succeeded.
+*/
+bool AwsDoc::S3::Hasher::calculateObjectHash(Aws::IOStream &data,
+ AwsDoc::S3::HASH_METHOD hashMethod) {
+ switch (hashMethod) {
+ case AwsDoc::S3::DEFAULT:
+ std::cerr << "Default hash method in calculateObjectHash." << std::endl;
+ break;
+ case AwsDoc::S3::MD5:
+ m_Hash = Aws::Utils::HashingUtils::CalculateMD5(data);
+ break;
+ case AwsDoc::S3::SHA1:
+ m_Hash = Aws::Utils::HashingUtils::CalculateSHA1(data);
+ break;
+ case AwsDoc::S3::SHA256:
+ m_Hash = Aws::Utils::HashingUtils::CalculateSHA256(data);
+ break;
+ case AwsDoc::S3::CRC32:
+ m_Hash = Aws::Utils::HashingUtils::CalculateCRC32(data);
+ break;
+ case AwsDoc::S3::CRC32C:
+ m_Hash = Aws::Utils::HashingUtils::CalculateCRC32C(data);
+ break;
+ default:
+ std::cerr << "Unknown hash method." << std::endl;
+ return false;
+ }
+ data.clear();
+ data.seekg(0, std::ifstream::beg);
+ return true;
+}
+
+//! Retrieve the stored hash as a Base64 string.
+/*!
+ \return String: Hash as Base64 string.
+*/
+Aws::String AwsDoc::S3::Hasher::getBase64HashString() const {
+ return Aws::Utils::HashingUtils::Base64Encode(m_Hash);
+}
+
+//! Retrieve the stored hash as a hexadecimal string.
+/*!
+ \return String: Hash as hexadecimal string.
+*/
+Aws::String AwsDoc::S3::Hasher::getHexHashString() const {
+ std::stringstream stringstream;
+ stringstream << std::hex << std::setfill('0');
+ for (int i = 0; i < m_Hash.GetLength(); ++i) {
+ stringstream << std::setw(2) << (int) m_Hash[i];
+ }
+
+ return stringstream.str();
+}
+
+//! Retrieve the stored hash as a ByteBuffer.
+/*!
+ \return String: Hash as ByteBuffer.
+*/
+Aws::Utils::ByteBuffer AwsDoc::S3::Hasher::getByteBufferHash() const {
+ return m_Hash;
+}
+
+//! Test routine passed as argument to askQuestion routine.
+/*!
+\param string: A string to test.
+\return bool: True if empty.
+*/
+bool AwsDoc::S3::testForEmptyString(const Aws::String &string) {
+ if (string.empty()) {
+ std::cout << "Enter some text." << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+//! Command line prompt/response utility function.
+/*!
+ \param string: A question prompt.
+ \param test: Test function for response.
+ \return Aws::String: User's response.
+ */
+Aws::String AwsDoc::S3::askQuestion(const Aws::String &string,
+ const std::function &test) {
+ Aws::String result;
+ do {
+ std::cout << string;
+ std::getline(std::cin, result);
+ } while (!test(result));
+
+ return result;
+}
+
+//! Command line prompt/response for yes/no question.
+/*!
+ \param string: A question prompt expecting a 'y' or 'n' response.
+ \return bool: True if yes.
+ */
+bool AwsDoc::S3::askYesNoQuestion(const Aws::String &string) {
+ Aws::String resultString = askQuestion(string, [](
+ const Aws::String &string1) -> bool {
+ bool result = false;
+ if (string1.length() == 1) {
+ int answer = std::tolower(string1[0]);
+ result = (answer == 'y') || (answer == 'n');
+ }
+
+ if (!result) {
+ std::cout << "Answer 'y' or 'n'." << std::endl;
+ }
+
+ return result;
+ });
+
+ return std::tolower(resultString[0]) == 'y';
+}
+
+//! Command line prompt/response utility function for an int result confined to
+//! a range.
+/*!
+ \param string: A question prompt.
+ \param low: Low inclusive.
+ \param high: High inclusive.
+ \return int: User's response.
+ */
+int
+AwsDoc::S3::askQuestionForIntRange(const Aws::String &string, int low,
+ int high) {
+ Aws::String resultString = askQuestion(string, [low, high](
+ const Aws::String &string1) -> bool {
+ try {
+ int number = std::stoi(string1);
+ bool result = number >= low && number <= high;
+ if (!result) {
+ std::cerr << "\nThe number is out of range." << std::endl;
+ }
+ return result;
+ }
+ catch (const std::invalid_argument &) {
+ std::cerr << "\nNot a valid number." << std::endl;
+ return false;
+ }
+ });
+
+ int result = 0;
+ try {
+ result = std::stoi(resultString);
+ }
+ catch (const std::invalid_argument &) {
+ std::cerr << "askQuestionForFloatRange string not an int "
+ << resultString << std::endl;
+ }
+
+ return result;
+}
+
+
+#pragma clang diagnostic pop
\ No newline at end of file
diff --git a/cpp/example_code/s3/tests/CMakeLists.txt b/cpp/example_code/s3/tests/CMakeLists.txt
index c32c01045d1..ca0993eda66 100644
--- a/cpp/example_code/s3/tests/CMakeLists.txt
+++ b/cpp/example_code/s3/tests/CMakeLists.txt
@@ -6,13 +6,13 @@ cmake_minimum_required(VERSION 3.14)
set(EXAMPLE_SERVICE_NAME "s3")
set(CURRENT_TARGET "${EXAMPLE_SERVICE_NAME}_gtest")
-set(CURRENT_TARGET_AWS_DEPENDENCIES s3 sts iam)
+set(CURRENT_TARGET_AWS_DEPENDENCIES s3 sts iam transfer)
# Set this project's name.
project("${EXAMPLE_SERVICE_NAME}-examples-gtests")
# Set the C++ standard to use to build this target.
-set(CMAKE_CXX_STANDARD 14)
+set(CMAKE_CXX_STANDARD 17)
find_package(GTest)
@@ -91,13 +91,21 @@ endif ()
enable_testing()
+# The Object Integrity Workflow is a special case and is handled differently.
+set(OBJECT_INTEGRITY_WORKFLOW_SOURCE s3_object_integrity_workflow.cpp)
foreach (TEST_FILE ${GTEST_SOURCE_FILES})
string(REPLACE "gtest_" "../" SOURCE_FILE ${TEST_FILE})
if (EXISTS ${SOURCE_FILE})
list(APPEND GTEST_SOURCE ${SOURCE_FILE} ${TEST_FILE})
+
else ()
- message("Error: no associated source file found for ${TEST_FILE}")
+ string(FIND ${SOURCE_FILE} ${OBJECT_INTEGRITY_WORKFLOW_SOURCE} POS)
+ if (POS EQUAL -1)
+ message("Error: no associated source file found for ${TEST_FILE}")
+ else ()
+ list(APPEND GTEST_SOURCE ../s3_object_integrity_workflow/${OBJECT_INTEGRITY_WORKFLOW_SOURCE} ${TEST_FILE})
+ endif ()
endif ()
endforeach ()
@@ -112,20 +120,14 @@ target_sources(
target_include_directories(
${CURRENT_TARGET}
PUBLIC
- $
- $
-)
-
-target_include_directories(
- ${CURRENT_TARGET}
- SYSTEM PUBLIC
+ ${CMAKE_CURRENT_SOURCE_DIR}/..
${CURL_INCLUDE_DIRS}
)
target_compile_definitions(
${CURRENT_TARGET}
PUBLIC
- TESTING_BUILD
+ EXCLUDE_MAIN_FUNCTION
SRC_DIR="${CMAKE_CURRENT_SOURCE_DIR}"
)
@@ -148,6 +150,7 @@ target_link_directories(${CURRENT_TARGET}
${CURL_LIBRARIES})
include(GoogleTest)
+
gtest_add_tests(
TARGET
${CURRENT_TARGET}
diff --git a/cpp/example_code/s3/tests/S3_GTests.cpp b/cpp/example_code/s3/tests/S3_GTests.cpp
index 8e3bb0c0e0b..ada6dc9af0b 100644
--- a/cpp/example_code/s3/tests/S3_GTests.cpp
+++ b/cpp/example_code/s3/tests/S3_GTests.cpp
@@ -380,6 +380,13 @@ void AwsDocTest::S3_GTests::SetUp() {
m_savedBuffer = std::cout.rdbuf();
std::cout.rdbuf(&m_coutBuffer);
}
+
+ m_savedInBuffer = std::cin.rdbuf();
+ std::cin.rdbuf(&m_cinBuffer);
+
+ // The following code is needed for the AwsDocTest::MyStringBuffer::underflow exception.
+ // Otherwise, we get an infinite loop when the buffer is empty.
+ std::cin.exceptions(std::ios_base::badbit);
}
void AwsDocTest::S3_GTests::TearDown() {
@@ -387,8 +394,13 @@ void AwsDocTest::S3_GTests::TearDown() {
std::cout.rdbuf(m_savedBuffer);
m_savedBuffer = nullptr;
}
-}
+ if (m_savedInBuffer != nullptr) {
+ std::cin.rdbuf(m_savedInBuffer);
+ std::cin.exceptions(std::ios_base::goodbit);
+ m_savedInBuffer = nullptr;
+ }
+}
Aws::String AwsDocTest::S3_GTests::preconditionError() {
return "Failed to meet precondition.";
@@ -398,6 +410,16 @@ bool AwsDocTest::S3_GTests::suppressStdOut() {
return std::getenv("EXAMPLE_TESTS_LOG_ON") == nullptr;
}
+void AwsDocTest::S3_GTests::AddCommandLineResponses(
+ const std::vector &responses) {
+
+ std::stringstream stringStream;
+ for (auto &response: responses) {
+ stringStream << response << "\n";
+ }
+ m_cinBuffer.str(stringStream.str());
+}
+
AwsDocTest::MockHTTP::MockHTTP() {
requestTmp = CreateHttpRequest(Aws::Http::URI("https://test.com/"),
Aws::Http::HttpMethod::HTTP_GET,
@@ -436,5 +458,12 @@ bool AwsDocTest::MockHTTP::addResponseWithBody(const std::string &fileName,
return false;
}
+int AwsDocTest::MyStringBuffer::underflow() {
+ int result = basic_stringbuf::underflow();
+ if (result == EOF) {
+ std::cerr << "Error AwsDocTest::MyStringBuffer::underflow." << std::endl;
+ throw std::underflow_error("AwsDocTest::MyStringBuffer::underflow");
+ }
-
+ return result;
+}
diff --git a/cpp/example_code/s3/tests/S3_GTests.h b/cpp/example_code/s3/tests/S3_GTests.h
index 727a494f82a..19e9d3ac190 100644
--- a/cpp/example_code/s3/tests/S3_GTests.h
+++ b/cpp/example_code/s3/tests/S3_GTests.h
@@ -12,6 +12,9 @@
#include
namespace AwsDocTest {
+ class MyStringBuffer : public std::stringbuf {
+ int underflow() override;
+ };
class S3_GTests : public testing::Test {
protected:
@@ -52,6 +55,8 @@ namespace AwsDocTest {
static Aws::String preconditionError();
+ void AddCommandLineResponses(const std::vector &responses);
+
// s_clientConfig must be a pointer because the client config must be initialized after InitAPI.
static std::unique_ptr s_clientConfig;
@@ -69,6 +74,9 @@ namespace AwsDocTest {
std::stringbuf m_coutBuffer; // used just to silence cout.
std::streambuf *m_savedBuffer = nullptr;
+
+ MyStringBuffer m_cinBuffer;
+ std::streambuf *m_savedInBuffer = nullptr;
};
class MockHTTP {
diff --git a/cpp/example_code/s3/tests/gtest_list_objects.cpp b/cpp/example_code/s3/tests/gtest_list_objects.cpp
index f58d26df98c..8a75cbb7d77 100644
--- a/cpp/example_code/s3/tests/gtest_list_objects.cpp
+++ b/cpp/example_code/s3/tests/gtest_list_objects.cpp
@@ -22,7 +22,8 @@ namespace AwsDocTest {
std::vector bucketNames = GetCachedS3Buckets(BUCKETS_NEEDED);
ASSERT_GE(bucketNames.size(), BUCKETS_NEEDED) << "Failed to meet precondition" << std::endl;
- bool result = AwsDoc::S3::listObjects(bucketNames[0], *s_clientConfig);
+ Aws::Vector keysResult;
+ bool result = AwsDoc::S3::listObjects(bucketNames[0], keysResult, *s_clientConfig);
EXPECT_TRUE(result);
}
} // namespace AwsDocTest
diff --git a/cpp/example_code/s3/tests/gtest_s3_object_integrity_workflow.cpp b/cpp/example_code/s3/tests/gtest_s3_object_integrity_workflow.cpp
new file mode 100644
index 00000000000..f46ee953b0d
--- /dev/null
+++ b/cpp/example_code/s3/tests/gtest_s3_object_integrity_workflow.cpp
@@ -0,0 +1,222 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+ * Test types are indicated by the test label ending.
+ *
+ * _1_ Requires credentials, permissions, and AWS resources.
+ * _2_ Requires credentials and permissions.
+ * _3_ Does not require credentials.
+ *
+ */
+
+#include
+#include "s3_examples.h"
+#include "S3_GTests.h"
+
+namespace AwsDocTest {
+ // NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_default_sdk_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "1", // Choose from one of the following checksum algorithms
+ "y", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ }
+ );
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_MD5_sdk_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "2", // Choose from one of the following checksum algorithms
+ "y", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_CRC32_sdk_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "3", // Choose from one of the following checksum algorithms
+ "y", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_CRC32C_sdk_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "4", // Choose from one of the following checksum algorithms
+ "y", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_SHA1_sdk_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "5", // Choose from one of the following checksum algorithms
+ "y", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_SHA256_sdk_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "6", // Choose from one of the following checksum algorithms
+ "y", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_default_calc_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "1", // Choose from one of the following checksum algorithms
+ "n", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_MD5_calc_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "2", // Choose from one of the following checksum algorithms
+ "n", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_CRC32_calc_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "3", // Choose from one of the following checksum algorithms
+ "n", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_CRC32C_calc_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "4", // Choose from one of the following checksum algorithms
+ "n", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_SHA1_calc_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "5", // Choose from one of the following checksum algorithms
+ "n", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+
+// NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(S3_GTests, s3_object_integrity_workflow_SHA256_calc_checksum_2_
+ ) {
+ AddCommandLineResponses({
+ "6", // Choose from one of the following checksum algorithms
+ "n", // Let the SDK calculate the checksum for you? (y/n)
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "", // Press Enter to continue...
+ "y" // Would you like to delete the resources created in this workflow? (y/n)
+ });
+
+ auto result = AwsDoc::S3::s3ObjectIntegrityWorkflow(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+} // namespace AwsDocTest
diff --git a/workflows/s3_object_integrity/README.md b/workflows/s3_object_integrity/README.md
new file mode 100644
index 00000000000..72debba2f33
--- /dev/null
+++ b/workflows/s3_object_integrity/README.md
@@ -0,0 +1,30 @@
+# Amazon S3 Object Integrity Workflow
+
+## Overview
+
+- The workflow demonstrates how to use AWS SDKs to verify the integrity of objects uploaded to Amazon S3.
+- It shows how object integrity is verified for different upload methods: PutObject, TransferManager, and multipart upload.
+- The workflow demonstrates the use of all 5 hash algorithms supported by S3 for object verification: MD5, CRC32, CRC32C, SHA1, and SHA256.
+- This workflow demonstrates the different options provided by the SDK for hashing.
+- To demonstrate how the hashes are calculated, the workflow calculates the hashes in the code and compares the results with the hashes calculated automatically by the SDK.
+
+
+The workflow runs as a command-line application that prompts the user for input.
+
+### Resources
+
+The workflow scenario steps create the bucket and objects needed for the example. No additional resources are required.
+
+## Implementations
+
+This example is implemented in the following languages:
+
+- [C++](../../cpp/example_code/s3/s3_object_integrity_workflow/README.md)
+
+## Additional reading
+
+- [Checking Object Integrity](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+
+---
+
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0
diff --git a/workflows/s3_object_integrity/SPECIFICATION.md b/workflows/s3_object_integrity/SPECIFICATION.md
new file mode 100644
index 00000000000..a4eafeb7297
--- /dev/null
+++ b/workflows/s3_object_integrity/SPECIFICATION.md
@@ -0,0 +1,164 @@
+# Amazon S3 Object Integrity Workflow - Technical specification
+
+This document contains the technical specifications for _Amazon S3 Integrity Lock Workflow_,
+a workflow scenario that showcases AWS services and SDKs. It is primarily intended for the AWS code
+examples team to use while developing this example in additional languages.
+
+This document explains the following:
+
+- Architecture and features of the example workflow.
+- Metadata information for the scenario.
+- Sample reference output.
+
+For an introduction, see the [README.md](README.md).
+
+---
+
+### Table of contents
+
+- [Resources and User Input](#resources-and-user-input)
+- [Metadata](#metadata)
+
+## Resources and User Input
+
+- Amazon Simple Storage Service (Amazon S3) Buckets and objects (created in the scenario)
+ - One bucket to upload files to.
+
+A file that is at least 10 MB is needed to demonstrate multi-part uploads.
+This file is create by writing the workspace source file repeated to a new
+file in the working directory.
+Example:
+```
+This workflow demonstrates how Amazon S3 uses checksum values to verify the integrity of data
+uploaded to Amazon S3 buckets
+The AWS SDK for C++ automatically handles checksums.
+By default it calculates a checksum that is uploaded with an object.
+The default checksum algorithm for PutObject and MultiPart upload is an MD5 hash.
+The default checksum algorithm for TransferManager uploads is a CRC32 checksum.
+You can override the default behavior, requiring one of the following checksums,
+MD5, CRC32, CRC32C, SHA-1 or SHA-256.
+You can also set the checksum hash value, instead of letting the SDK calculate the value.
+For more information, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html.
+This workflow will locally compute checksums for files uploaded to an Amazon S3 bucket,
+even when the SDK also computes the checksum.
+This is done to provide demonstration code for how the checksums are calculated.
+A bucket named 'integrity-workflow-92e7e370-b096-4b7f-bf24-7d3496a19772' will be created for the object uploads.
+Created bucket integrity-workflow-92e7e370-b096-4b7f-bf24-7d3496a19772 in the specified AWS Region.
+
+
+```
+The user chooses the hash method. They also choose whether to let the SDK calculate hashes
+or to use a hash calculated by this app.
+Example:
+
+```
+Choose from one of the following checksum algorithms.
+ 1 - Default
+ 2 - MD5
+ 3 - CRC32
+ 4 - CRC32C
+ 5 - SHA1
+ 6 - SHA256
+Enter an index: 3
+Let the SDK calculate the checksum for you? (y/n) n
+```
+
+The workflow demonstrates object integrity for PutObject. The hash
+is always calculated in the app, providing the user with example code
+demonstrating hash calculation. Hashes are downloaded from the server
+and compared to locally calculated hashes.
+Example
+```
+The workflow will now upload a file using PutObject.
+Object integrity will be verified using the CRC32 algorithm.
+A checksum computed by this workflow will be used for object integrity verification,
+except for the TransferManager upload.
+Press Enter to continue...
+
+***************************************************************************************
+
+Object successfully uploaded.
+The upload was successful.
+If the checksums had not matched, the upload would have failed.
+The checksums calculated by the server have been retrieved using the GetObjectAttributes.
+The locally calculated checksums have been verified against the retrieved checksums.
+For PutObject upload retrieved hash is AP39dw==
+For PutObject upload, locally and remotely calculated hashes all match!
+```
+The workflow repeats this process with the TransferManager, uploading the
+large file using the multi-part upload APIs. In the case of the TransferManager,
+SDK calculated APIs are always used, because of difficulties using locally calculated
+hashes.
+Example:
+
+```
+Now the workflow will demonstrate object integrity for TransferManager multi-part uploads.
+The AWS C++ SDK has a TransferManager class which simplifies multipart uploads.
+The following code lets the TransferManager handle much of the checksum configuration.
+An object with the key 'tr_CRC32_large_test_file.cpp will be uploaded by the TransferManager using a 5 MB buffer.
+For TransferManager uploads, this demo always lets the SDK calculate the hash value.
+Press Enter to continue...
+
+***************************************************************************************
+
+Uploading the file...
+For TransferManager upload retrieved hash is 7YCXxg==
+6 part hash(es) were also retrieved.
+ Part hash rCTaNA==
+ Part hash yOLe+Q==
+ Part hash I60aeg==
+ Part hash boj9Ew==
+ Part hash 0tmIfQ==
+ Part hash 6/tRKA==
+For TransferManager upload, locally and remotely calculated hashes all match!
+```
+
+The workflow demonstrates hashing using the multi-part upload APIs. In this case,
+locally calculated hashes are used if the user selected that option.
+
+```
+Now we will provide an in-depth demonstration of multi-part uploading by calling the multi-part upload APIs directly.
+These are the same APIs used by the TransferManager when uploading large files.
+In the following code, the checksums are also calculated locally and then compared.
+For multi-part uploads, a checksum is uploaded with each part. The final checksum is a concatenation of
+the checksums for each part.
+This is explained in the user guide, https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html," in the section "Using part-level checksums for multipart uploads".
+Starting multipart upload of with hash method CRC32 uploading to with object key
+'mp_CRC32_large_test_file.cpp',
+Press Enter to continue...
+
+Uploading part 1.
+Uploading part 2.
+Uploading part 3.
+Uploading part 4.
+Uploading part 5.
+Uploading part 6.
+Multipart upload completed.
+Finished multipart upload of with hash method CRC32
+Now we will retrieve the checksums from the server.
+For MultiPart upload retrieved hash is 7YCXxg==
+6 part hash(es) were also retrieved.
+ Part hash rCTaNA==
+ Part hash yOLe+Q==
+ Part hash I60aeg==
+ Part hash boj9Ew==
+ Part hash 0tmIfQ==
+ Part hash 6/tRKA==
+For MultiPart upload, locally and remotely calculated hashes all match!
+```
+
+The user is given the option to delete the resources created by this workflow.
+
+## Metadata
+
+| action / scenario | metadata file | metadata key |
+|--------------------------------|------------------|-----------------------------|
+| `AbortMultipartUpload` | s3_metadata.yaml | s3_AbortMultipartUpload |
+| `CreateMultipartUpload` | s3_metadata.yaml | s3_CreateMultipartUpload |
+| `DeleteObject` | s3_metadata.yaml | s3_DeleteObject |
+| `GetObjectAttributes` | s3_metadata.yaml | s3_GetObjectAttributes |
+| `PutObject` | s3_metadata.yaml | s3_PutObject |
+| `UploadPart` | s3_metadata.yaml | s3_UploadPart |
+| `CompleteMultipartUpload` | s3_metadata.yaml | s3_CompleteMultipartUpload |
+| `S3 Object Integrity Scenario` | s3_metadata.yaml | s3_Scenario_ObjectIntegrity |
+
From eafb697c06171b5f79cf620f833b0cb058f67a02 Mon Sep 17 00:00:00 2001
From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com>
Date: Wed, 24 Jul 2024 11:58:40 -0500
Subject: [PATCH 08/98] .NET and Python: Adding genai tags to metadata. (#6682)
Adding genai tags to metadata.
---
.doc_gen/metadata/medical-imaging_metadata.yaml | 8 ++++++++
.doc_gen/metadata/sesv2_metadata.yaml | 11 +++++++++++
2 files changed, 19 insertions(+)
diff --git a/.doc_gen/metadata/medical-imaging_metadata.yaml b/.doc_gen/metadata/medical-imaging_metadata.yaml
index ffeaa831aef..9b1b97be602 100644
--- a/.doc_gen/metadata/medical-imaging_metadata.yaml
+++ b/.doc_gen/metadata/medical-imaging_metadata.yaml
@@ -33,6 +33,7 @@ medical-imaging_Hello:
github_note_at_bottom: true
excerpts:
- description:
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.Hello
services:
@@ -1162,31 +1163,38 @@ medical-imaging_Scenario_ImageSetsAndFrames:
github_note_at_bottom: true
excerpts:
- description: Create an &CFN; stack with the necessary resources.
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.workflow.deploy
- description: Copy DICOM files to the &S3; import bucket.
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.workflow.copy
- description: Import the DICOM files to the &S3; data store.
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
- python.example_code.medical-imaging.workflow.StartDICOMImportJob
- description: Get image sets created by the DICOM import job.
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
- python.example_code.medical-imaging.workflow.GetImageSetsForImportJob
- python.example_code.medical-imaging.workflow.GetImageSet
- description: Get image frame information for image sets.
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
- python.example_code.medical-imaging.workflow.GetImageFrames
- python.example_code.medical-imaging.workflow.GetImageSetMetadata
- description: Download, decode and verify image frames.
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
- python.example_code.medical-imaging.workflow.GetPixelData
- python.example_code.medical-imaging.workflow.downloadAndCheck
- description: Clean up resources.
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.workflow.destroy
- python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
diff --git a/.doc_gen/metadata/sesv2_metadata.yaml b/.doc_gen/metadata/sesv2_metadata.yaml
index 69d75526f4b..a380f9ca83c 100644
--- a/.doc_gen/metadata/sesv2_metadata.yaml
+++ b/.doc_gen/metadata/sesv2_metadata.yaml
@@ -6,6 +6,7 @@ sesv2_CreateContactList:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.CreateContactList
Rust:
@@ -47,6 +48,7 @@ sesv2_CreateContact:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.CreateContact
Rust:
@@ -111,6 +113,7 @@ sesv2_ListContacts:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.ListContacts
Rust:
@@ -151,6 +154,7 @@ sesv2_SendEmail:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.SendEmail
Java:
@@ -211,6 +215,7 @@ sesv2_CreateEmailIdentity:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.CreateEmailIdentity
Java:
@@ -252,6 +257,7 @@ sesv2_CreateEmailTemplate:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.CreateEmailTemplate
Java:
@@ -293,6 +299,7 @@ sesv2_DeleteContactList:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.DeleteContactList
Java:
@@ -334,6 +341,7 @@ sesv2_DeleteEmailIdentity:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.DeleteEmailIdentity
Java:
@@ -375,6 +383,7 @@ sesv2_DeleteEmailTemplate:
github: dotnetv3/SESv2
excerpts:
- description:
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper.DeleteEmailTemplate
Java:
@@ -420,9 +429,11 @@ sesv2_NewsletterWorkflow:
github: dotnetv3/SESv2
excerpts:
- description: Run the workflow.
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.NewsletterWorkflow
- description: Wrapper for service operations.
+ genai: most
snippet_tags:
- SESWorkflow.dotnetv3.SESv2Wrapper
Java:
From 5abd82ccd3f68555fa5b7b880b47ec31bcf63490 Mon Sep 17 00:00:00 2001
From: David Souther
Date: Thu, 25 Jul 2024 12:03:48 -0400
Subject: [PATCH 09/98] Update rust api link_template. (#6683)
Relies on internal commit 139264199
---
.doc_gen/metadata/sdks.yaml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.doc_gen/metadata/sdks.yaml b/.doc_gen/metadata/sdks.yaml
index 9ba84ff8956..b9fa685c376 100644
--- a/.doc_gen/metadata/sdks.yaml
+++ b/.doc_gen/metadata/sdks.yaml
@@ -279,7 +279,8 @@ Rust:
api_ref:
uid: "SdkForRustV1"
name: "&guide-rust-api;"
- link_template: "https://docs.rs/releases/search?query=aws-sdk"
+ # https://docs.rs/aws-sdk-apigateway/latest/aws_sdk_apigateway/client/struct.Client.html#method.get_rest_apis
+ link_template: "https://docs.rs/aws-sdk-{{.ServiceCollapsed}}/latest/aws_sdk_{{.ServiceCollapsed}}/client/struct.Client.html#method.{{.ActionSnake}}"
guide: "&guide-rust-dev;"
Swift:
property: swift
From 70b06d499f5a048088de0a371fc60d4a6438220c Mon Sep 17 00:00:00 2001
From: David Souther
Date: Thu, 25 Jul 2024 14:03:39 -0400
Subject: [PATCH 10/98] Rust: Filter Aurora Instance Option list. (#6689)
Filter Aurora Instance Option list.
---
rustv1/examples/aurora/README.md | 22 +++++++++----------
.../aurora/src/aurora_scenario/mod.rs | 1 +
.../aurora/src/aurora_scenario/tests.rs | 7 ++++++
3 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/rustv1/examples/aurora/README.md b/rustv1/examples/aurora/README.md
index 81b6272eba6..59a51080207 100644
--- a/rustv1/examples/aurora/README.md
+++ b/rustv1/examples/aurora/README.md
@@ -38,19 +38,19 @@ For prerequisites, see the [README](../../README.md#Prerequisites) in the `rustv
Code excerpts that show you how to call individual service functions.
-- [CreateDBCluster](src/aurora_scenario/mod.rs#L352)
-- [CreateDBClusterParameterGroup](src/aurora_scenario/mod.rs#L203)
-- [CreateDBClusterSnapshot](src/aurora_scenario/mod.rs#L352)
-- [CreateDBInstance](src/aurora_scenario/mod.rs#L352)
-- [DeleteDBCluster](src/aurora_scenario/mod.rs#L512)
-- [DeleteDBClusterParameterGroup](src/aurora_scenario/mod.rs#L512)
-- [DeleteDBInstance](src/aurora_scenario/mod.rs#L512)
-- [DescribeDBClusterParameters](src/aurora_scenario/mod.rs#L286)
-- [DescribeDBClusters](src/aurora_scenario/mod.rs#L352)
+- [CreateDBCluster](src/aurora_scenario/mod.rs#L353)
+- [CreateDBClusterParameterGroup](src/aurora_scenario/mod.rs#L204)
+- [CreateDBClusterSnapshot](src/aurora_scenario/mod.rs#L353)
+- [CreateDBInstance](src/aurora_scenario/mod.rs#L353)
+- [DeleteDBCluster](src/aurora_scenario/mod.rs#L513)
+- [DeleteDBClusterParameterGroup](src/aurora_scenario/mod.rs#L513)
+- [DeleteDBInstance](src/aurora_scenario/mod.rs#L513)
+- [DescribeDBClusterParameters](src/aurora_scenario/mod.rs#L287)
+- [DescribeDBClusters](src/aurora_scenario/mod.rs#L353)
- [DescribeDBEngineVersions](src/aurora_scenario/mod.rs#L142)
-- [DescribeDBInstances](src/aurora_scenario/mod.rs#L512)
+- [DescribeDBInstances](src/aurora_scenario/mod.rs#L513)
- [DescribeOrderableDBInstanceOptions](src/aurora_scenario/mod.rs#L179)
-- [ModifyDBClusterParameterGroup](src/aurora_scenario/mod.rs#L315)
+- [ModifyDBClusterParameterGroup](src/aurora_scenario/mod.rs#L316)
### Scenarios
diff --git a/rustv1/examples/aurora/src/aurora_scenario/mod.rs b/rustv1/examples/aurora/src/aurora_scenario/mod.rs
index bd5402ac5fb..f509f7432d7 100644
--- a/rustv1/examples/aurora/src/aurora_scenario/mod.rs
+++ b/rustv1/examples/aurora/src/aurora_scenario/mod.rs
@@ -193,6 +193,7 @@ impl AuroraScenario {
.map(|options| {
options
.iter()
+ .filter(|o| o.storage_type() == Some("aurora"))
.map(|o| o.db_instance_class().unwrap_or_default().to_string())
.collect::>()
})
diff --git a/rustv1/examples/aurora/src/aurora_scenario/tests.rs b/rustv1/examples/aurora/src/aurora_scenario/tests.rs
index 28c27bc44f1..70f8a5e5051 100644
--- a/rustv1/examples/aurora/src/aurora_scenario/tests.rs
+++ b/rustv1/examples/aurora/src/aurora_scenario/tests.rs
@@ -205,12 +205,19 @@ async fn test_scenario_get_instance_classes() {
Ok(vec![
OrderableDbInstanceOption::builder()
.db_instance_class("t1")
+ .storage_type("aurora")
+ .build(),
+ OrderableDbInstanceOption::builder()
+ .db_instance_class("t1")
+ .storage_type("aurora-iopt1")
.build(),
OrderableDbInstanceOption::builder()
.db_instance_class("t2")
+ .storage_type("aurora")
.build(),
OrderableDbInstanceOption::builder()
.db_instance_class("t3")
+ .storage_type("aurora")
.build(),
])
});
From 8025bf7e42b94b424f5a40ecf6325d0a3a4a5d8a Mon Sep 17 00:00:00 2001
From: David Souther
Date: Thu, 25 Jul 2024 15:14:27 -0400
Subject: [PATCH 11/98] Rust: Bedrock Examples (#6686)
Bedrock Runtime Converse API in Rust to Anthropic Claude
---
.../metadata/bedrock-runtime_metadata.yaml | 40 ++
rustv1/examples/Cargo.toml | 1 +
rustv1/examples/bedrock-runtime/Cargo.toml | 18 +
rustv1/examples/bedrock-runtime/README.md | 77 ++++
.../src/bin/converse-stream.rs | 138 ++++++
.../bedrock-runtime/src/bin/converse.rs | 93 ++++
.../bedrock-runtime/src/bin/tool-use.rs | 416 ++++++++++++++++++
7 files changed, 783 insertions(+)
create mode 100644 rustv1/examples/bedrock-runtime/Cargo.toml
create mode 100644 rustv1/examples/bedrock-runtime/README.md
create mode 100644 rustv1/examples/bedrock-runtime/src/bin/converse-stream.rs
create mode 100644 rustv1/examples/bedrock-runtime/src/bin/converse.rs
create mode 100644 rustv1/examples/bedrock-runtime/src/bin/tool-use.rs
diff --git a/.doc_gen/metadata/bedrock-runtime_metadata.yaml b/.doc_gen/metadata/bedrock-runtime_metadata.yaml
index 5e75132ec8e..136a70cef54 100644
--- a/.doc_gen/metadata/bedrock-runtime_metadata.yaml
+++ b/.doc_gen/metadata/bedrock-runtime_metadata.yaml
@@ -154,6 +154,17 @@ bedrock-runtime_Converse_AnthropicClaude:
- description: Send a text message to Anthropic Claude, using Bedrock's Converse API.
snippet_tags:
- javascript.v3.bedrock-runtime.Converse_AnthropicClaude
+ Rust:
+ versions:
+ - sdk_version: 1
+ github: rustv1/examples/bedrock-runtime
+ excerpts:
+ - description: Send a text message to Anthropic Claude, using Bedrock's Converse API.
+ snippet_tags:
+ - rust.bedrock-runtime.Converse_AnthropicClaude
+ - description: Use statements, Error utility, and constants.
+ snippet_tags:
+ - rust.bedrock-runtime.Converse_AnthropicClaude.supporting
services:
bedrock-runtime: {Converse}
@@ -369,6 +380,17 @@ bedrock-runtime_ConverseStream_AnthropicClaude:
- description: Send a text message to Anthropic Claude, using Bedrock's Converse API and process the response stream in real-time.
snippet_tags:
- javascript.v3.bedrock-runtime.ConverseStream_AnthropicClaude
+ Rust:
+ versions:
+ - sdk_version: 1
+ github: rustv1/examples/bedrock-runtime
+ excerpts:
+ - description: Send a text message to Anthropic Claude and stream reply tokens, using Bedrock's ConverseStream API.
+ snippet_tags:
+ - rust.bedrock-runtime.ConverseStream_AnthropicClaude
+ - description: Use statements, Error utility, and constants.
+ snippet_tags:
+ - rust.bedrock-runtime.ConverseStream_AnthropicClaude.supporting
services:
bedrock-runtime: {ConverseStream}
@@ -1276,6 +1298,24 @@ bedrock-runtime_Scenario_ToolUseDemo_AnthropicClaude:
- description: "The weather tool used by the demo. This script defines the tool specification and implements the logic to retrieve weather data using from the Open-Meteo API."
snippet_files:
- python/example_code/bedrock-runtime/cross-model-scenarios/tool_use_demo/weather_tool.py
+ Rust:
+ versions:
+ - sdk_version: 1
+ github: rustv1/examples/bedrock-runtime
+ excerpts:
+ - description: "The primary scenario and logic for the demo. This orchestrates the conversation between the user, the &BR; Converse API, and a weather tool."
+ snippet_tags:
+ - rust.bedrock-runtime.Converse_AnthropicClaude.tool-use
+ - description: "The weather tool used by the demo. This script defines the tool specification and implements the logic to retrieve weather data using from the Open-Meteo API."
+ snippet_tags:
+ - rust.bedrock-runtime.Converse_AnthropicClaude.tool-use.weather-tool
+ - description: "Utilities to print the Message Content Blocks"
+ snippet_tags:
+ - rust.bedrock-runtime.Converse_AnthropicClaude.tool-use.user-interface
+ - description: "Use statements, Error utility, and constants."
+ snippet_tags:
+ - rust.bedrock-runtime.Converse_AnthropicClaude.tool-use.supporting
+
services:
bedrock-runtime: {Converse}
diff --git a/rustv1/examples/Cargo.toml b/rustv1/examples/Cargo.toml
index b9fdf16a236..92a620b4185 100644
--- a/rustv1/examples/Cargo.toml
+++ b/rustv1/examples/Cargo.toml
@@ -10,6 +10,7 @@ members = [
"auto-scaling",
"autoscalingplans",
"batch",
+ "bedrock-runtime",
"cloudformation",
"cloudwatch",
"cloudwatchlogs",
diff --git a/rustv1/examples/bedrock-runtime/Cargo.toml b/rustv1/examples/bedrock-runtime/Cargo.toml
new file mode 100644
index 00000000000..06a20e8a411
--- /dev/null
+++ b/rustv1/examples/bedrock-runtime/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "bedrock-runtime"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+aws-config = "1.5.4"
+aws-sdk-bedrockruntime = "1.40.0"
+aws-smithy-runtime-api = "1.7.1"
+aws-smithy-types = "1.2.0"
+reqwest = "0.12.5"
+serde = "1.0.204"
+serde_json = "1.0.120"
+tokio = { version = "1.38.1", features = ["full"] }
+tracing = "0.1.40"
+tracing-subscriber = "0.3.18"
diff --git a/rustv1/examples/bedrock-runtime/README.md b/rustv1/examples/bedrock-runtime/README.md
new file mode 100644
index 00000000000..ae64bcc7542
--- /dev/null
+++ b/rustv1/examples/bedrock-runtime/README.md
@@ -0,0 +1,77 @@
+# Amazon Bedrock Runtime code examples for the SDK for Rust
+
+## Overview
+
+Shows how to use the AWS SDK for Rust to work with Amazon Bedrock Runtime.
+
+
+
+
+_Amazon Bedrock Runtime is a fully managed service that makes it easy to use foundation models from third-party providers and Amazon._
+
+## ⚠ Important
+
+* Running this code might result in charges to your AWS account. For more details, see [AWS Pricing](https://aws.amazon.com/pricing/) and [Free Tier](https://aws.amazon.com/free/).
+* Running the tests might result in charges to your AWS account.
+* We recommend that you grant your code least privilege. At most, grant only the minimum permissions required to perform the task. For more information, see [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege).
+* This code is not tested in every AWS Region. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services).
+
+
+
+
+## Code examples
+
+### Prerequisites
+
+For prerequisites, see the [README](../../README.md#Prerequisites) in the `rustv1` folder.
+
+
+
+
+### Anthropic Claude
+
+- [Converse](src/bin/converse.rs#L43)
+- [ConverseStream](src/bin/converse-stream.rs#L70)
+- [Scenario: Tool use with the Converse API](src/bin/tool-use.rs#L242)
+
+
+
+
+
+## Run the examples
+
+### Instructions
+
+
+
+
+
+
+
+### Tests
+
+⚠ Running tests might result in charges to your AWS account.
+
+
+To find instructions for running these tests, see the [README](../../README.md#Tests)
+in the `rustv1` folder.
+
+
+
+
+
+
+## Additional resources
+
+- [Amazon Bedrock Runtime User Guide](https://docs.aws.amazon.com/bedrock/latest/userguide/what-is-bedrock.html)
+- [Amazon Bedrock Runtime API Reference](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html)
+- [SDK for Rust Amazon Bedrock Runtime reference](https://docs.rs/aws-sdk-bedrock-runtime/latest/aws_sdk_bedrock-runtime/)
+
+
+
+
+---
+
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+
+SPDX-License-Identifier: Apache-2.0
\ No newline at end of file
diff --git a/rustv1/examples/bedrock-runtime/src/bin/converse-stream.rs b/rustv1/examples/bedrock-runtime/src/bin/converse-stream.rs
new file mode 100644
index 00000000000..403215f0f1b
--- /dev/null
+++ b/rustv1/examples/bedrock-runtime/src/bin/converse-stream.rs
@@ -0,0 +1,138 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+// snippet-start:[rust.bedrock-runtime.ConverseStream_AnthropicClaude.supporting]
+use aws_config::BehaviorVersion;
+use aws_sdk_bedrockruntime::{
+ error::ProvideErrorMetadata,
+ operation::converse_stream::ConverseStreamError,
+ types::{
+ error::ConverseStreamOutputError, ContentBlock, ConversationRole,
+ ConverseStreamOutput as ConverseStreamOutputType, Message,
+ },
+ Client,
+};
+
+// Set the model ID, e.g., Claude 3 Haiku.
+const MODEL_ID: &str = "anthropic.claude-3-haiku-20240307-v1:0";
+const CLAUDE_REGION: &str = "us-east-1";
+
+// Start a conversation with the user message.
+const USER_MESSAGE: &str = "Describe the purpose of a 'hello world' program in one line.";
+
+#[derive(Debug)]
+struct BedrockConverseStreamError(String);
+impl std::fmt::Display for BedrockConverseStreamError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Can't invoke '{}'. Reason: {}", MODEL_ID, self.0)
+ }
+}
+impl std::error::Error for BedrockConverseStreamError {}
+impl From<&str> for BedrockConverseStreamError {
+ fn from(value: &str) -> Self {
+ BedrockConverseStreamError(value.into())
+ }
+}
+
+impl From<&ConverseStreamError> for BedrockConverseStreamError {
+ fn from(value: &ConverseStreamError) -> Self {
+ BedrockConverseStreamError(
+ match value {
+ ConverseStreamError::ModelTimeoutException(_) => "Model took too long",
+ ConverseStreamError::ModelNotReadyException(_) => "Model is not ready",
+ _ => "Unknown",
+ }
+ .into(),
+ )
+ }
+}
+
+impl From<&ConverseStreamOutputError> for BedrockConverseStreamError {
+ fn from(value: &ConverseStreamOutputError) -> Self {
+ match value {
+ ConverseStreamOutputError::ValidationException(ve) => BedrockConverseStreamError(
+ ve.message().unwrap_or("Unknown ValidationException").into(),
+ ),
+ ConverseStreamOutputError::ThrottlingException(te) => BedrockConverseStreamError(
+ te.message().unwrap_or("Unknown ThrottlingException").into(),
+ ),
+ value => BedrockConverseStreamError(
+ value
+ .message()
+ .unwrap_or("Unknown StreamOutput exception")
+ .into(),
+ ),
+ }
+ }
+}
+// snippet-end:[rust.bedrock-runtime.ConverseStream_AnthropicClaude.supporting]
+
+// snippet-start:[rust.bedrock-runtime.ConverseStream_AnthropicClaude]
+#[tokio::main]
+async fn main() -> Result<(), BedrockConverseStreamError> {
+ tracing_subscriber::fmt::init();
+ let sdk_config = aws_config::defaults(BehaviorVersion::latest())
+ .region(CLAUDE_REGION)
+ .load()
+ .await;
+ let client = Client::new(&sdk_config);
+
+ let response = client
+ .converse_stream()
+ .model_id(MODEL_ID)
+ .messages(
+ Message::builder()
+ .role(ConversationRole::User)
+ .content(ContentBlock::Text(USER_MESSAGE.to_string()))
+ .build()
+ .map_err(|_| "failed to build message")?,
+ )
+ .send()
+ .await;
+
+ let mut stream = match response {
+ Ok(output) => Ok(output.stream),
+ Err(e) => Err(BedrockConverseStreamError::from(
+ e.as_service_error().unwrap(),
+ )),
+ }?;
+
+ loop {
+ let token = stream.recv().await;
+ match token {
+ Ok(Some(text)) => {
+ let next = get_converse_output_text(text)?;
+ print!("{}", next);
+ Ok(())
+ }
+ Ok(None) => break,
+ Err(e) => Err(e
+ .as_service_error()
+ .map(BedrockConverseStreamError::from)
+ .unwrap_or(BedrockConverseStreamError(
+ "Unknown error receiving stream".into(),
+ ))),
+ }?
+ }
+
+ println!();
+
+ Ok(())
+}
+
+fn get_converse_output_text(
+ output: ConverseStreamOutputType,
+) -> Result {
+ Ok(match output {
+ ConverseStreamOutputType::ContentBlockDelta(event) => match event.delta() {
+ Some(delta) => delta
+ .as_text()
+ .map(|s| s.clone())
+ .unwrap_or_else(|_| "".into()),
+ None => "".into(),
+ },
+ _ => "".into(),
+ })
+}
+
+// snippet-end:[rust.bedrock-runtime.ConverseStream_AnthropicClaude]
diff --git a/rustv1/examples/bedrock-runtime/src/bin/converse.rs b/rustv1/examples/bedrock-runtime/src/bin/converse.rs
new file mode 100644
index 00000000000..88075e4b1b5
--- /dev/null
+++ b/rustv1/examples/bedrock-runtime/src/bin/converse.rs
@@ -0,0 +1,93 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+// snippet-start:[rust.bedrock-runtime.Converse_AnthropicClaude.supporting]
+use aws_config::BehaviorVersion;
+use aws_sdk_bedrockruntime::{
+ operation::converse::{ConverseError, ConverseOutput},
+ types::{ContentBlock, ConversationRole, Message},
+ Client,
+};
+
+// Set the model ID, e.g., Claude 3 Haiku.
+const MODEL_ID: &str = "anthropic.claude-3-haiku-20240307-v1:0";
+const CLAUDE_REGION: &str = "us-east-1";
+
+// Start a conversation with the user message.
+const USER_MESSAGE: &str = "Describe the purpose of a 'hello world' program in one line.";
+
+#[derive(Debug)]
+struct BedrockConverseError(String);
+impl std::fmt::Display for BedrockConverseError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Can't invoke '{}'. Reason: {}", MODEL_ID, self.0)
+ }
+}
+impl std::error::Error for BedrockConverseError {}
+impl From<&str> for BedrockConverseError {
+ fn from(value: &str) -> Self {
+ BedrockConverseError(value.to_string())
+ }
+}
+impl From<&ConverseError> for BedrockConverseError {
+ fn from(value: &ConverseError) -> Self {
+ BedrockConverseError::from(match value {
+ ConverseError::ModelTimeoutException(_) => "Model took too long",
+ ConverseError::ModelNotReadyException(_) => "Model is not ready",
+ _ => "Unknown",
+ })
+ }
+}
+// snippet-end:[rust.bedrock-runtime.Converse_AnthropicClaude.supporting]
+
+// snippet-start:[rust.bedrock-runtime.Converse_AnthropicClaude]
+#[tokio::main]
+async fn main() -> Result<(), BedrockConverseError> {
+ tracing_subscriber::fmt::init();
+ let sdk_config = aws_config::defaults(BehaviorVersion::latest())
+ .region(CLAUDE_REGION)
+ .load()
+ .await;
+ let client = Client::new(&sdk_config);
+
+ let response = client
+ .converse()
+ .model_id(MODEL_ID)
+ .messages(
+ Message::builder()
+ .role(ConversationRole::User)
+ .content(ContentBlock::Text(USER_MESSAGE.to_string()))
+ .build()
+ .map_err(|_| "failed to build message")?,
+ )
+ .send()
+ .await;
+
+ match response {
+ Ok(output) => {
+ let text = get_converse_output_text(output)?;
+ println!("{}", text);
+ Ok(())
+ }
+ Err(e) => Err(e
+ .as_service_error()
+ .map(BedrockConverseError::from)
+ .unwrap_or_else(|| BedrockConverseError("Unknown service error".into()))),
+ }
+}
+
+fn get_converse_output_text(output: ConverseOutput) -> Result {
+ let text = output
+ .output()
+ .ok_or("no output")?
+ .as_message()
+ .map_err(|_| "output not a message")?
+ .content()
+ .first()
+ .ok_or("no content in message")?
+ .as_text()
+ .map_err(|_| "content is not text")?
+ .to_string();
+ Ok(text)
+}
+// snippet-end:[rust.bedrock-runtime.Converse_AnthropicClaude]
diff --git a/rustv1/examples/bedrock-runtime/src/bin/tool-use.rs b/rustv1/examples/bedrock-runtime/src/bin/tool-use.rs
new file mode 100644
index 00000000000..090aa51b942
--- /dev/null
+++ b/rustv1/examples/bedrock-runtime/src/bin/tool-use.rs
@@ -0,0 +1,416 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+// snippet-start:[rust.bedrock-runtime.Converse_AnthropicClaude.tool-use.supporting]
+use std::{collections::HashMap, io::stdin};
+
+use aws_config::BehaviorVersion;
+use aws_sdk_bedrockruntime::{
+ error::{BuildError, SdkError},
+ operation::converse::{ConverseError, ConverseOutput},
+ types::{
+ ContentBlock, ConversationRole::User, Message, StopReason, SystemContentBlock, Tool,
+ ToolConfiguration, ToolInputSchema, ToolResultBlock, ToolResultContentBlock,
+ ToolSpecification, ToolUseBlock,
+ },
+ Client,
+};
+use aws_smithy_runtime_api::http::Response;
+use aws_smithy_types::Document;
+use tracing::debug;
+
+/// This demo illustrates a tool use scenario using Amazon Bedrock's Converse API and a weather tool.
+/// The script interacts with a foundation model on Amazon Bedrock to provide weather information based on user
+/// input. It uses the Open-Meteo API (https://open-meteo.com) to retrieve current weather data for a given location.
+
+// Set the model ID, e.g., Claude 3 Haiku.
+const MODEL_ID: &str = "anthropic.claude-3-haiku-20240307-v1:0";
+const CLAUDE_REGION: &str = "us-east-1";
+
+const SYSTEM_PROMPT: &str = "You are a weather assistant that provides current weather data for user-specified locations using only
+the Weather_Tool, which expects latitude and longitude. Infer the coordinates from the location yourself.
+If the user provides coordinates, infer the approximate location and refer to it in your response.
+To use the tool, you strictly apply the provided tool specification.
+
+- Explain your step-by-step process, and give brief updates before each step.
+- Only use the Weather_Tool for data. Never guess or make up information.
+- Repeat the tool use for subsequent requests if necessary.
+- If the tool errors, apologize, explain weather is unavailable, and suggest other options.
+- Report temperatures in °C (°F) and wind in km/h (mph). Keep weather reports concise. Sparingly use
+ emojis where appropriate.
+- Only respond to weather queries. Remind off-topic users of your purpose.
+- Never claim to search online, access external data, or use tools besides Weather_Tool.
+- Complete the entire process until you have all required data before sending the complete response.
+";
+
+// The maximum number of recursive calls allowed in the tool_use_demo function.
+// This helps prevent infinite loops and potential performance issues.
+const MAX_RECURSIONS: i8 = 5;
+
+const TOOL_NAME: &str = "Weather_Tool";
+const TOOL_DESCRIPTION: &str =
+ "Get the current weather for a given location, based on its WGS84 coordinates.";
+fn make_tool_schema() -> Document {
+ Document::Object(HashMap::::from([
+ ("type".into(), Document::String("object".into())),
+ (
+ "properties".into(),
+ Document::Object(HashMap::from([
+ (
+ "latitude".into(),
+ Document::Object(HashMap::from([
+ ("type".into(), Document::String("string".into())),
+ (
+ "description".into(),
+ Document::String("Geographical WGS84 latitude of the location.".into()),
+ ),
+ ])),
+ ),
+ (
+ "longitude".into(),
+ Document::Object(HashMap::from([
+ ("type".into(), Document::String("string".into())),
+ (
+ "description".into(),
+ Document::String(
+ "Geographical WGS84 longitude of the location.".into(),
+ ),
+ ),
+ ])),
+ ),
+ ])),
+ ),
+ (
+ "required".into(),
+ Document::Array(vec![
+ Document::String("latitude".into()),
+ Document::String("longitude".into()),
+ ]),
+ ),
+ ]))
+}
+
+#[derive(Debug)]
+struct ToolUseScenarioError(String);
+impl std::fmt::Display for ToolUseScenarioError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Tool use error with '{}'. Reason: {}", MODEL_ID, self.0)
+ }
+}
+impl From<&str> for ToolUseScenarioError {
+ fn from(value: &str) -> Self {
+ ToolUseScenarioError(value.into())
+ }
+}
+impl From for ToolUseScenarioError {
+ fn from(value: BuildError) -> Self {
+ ToolUseScenarioError(value.to_string().clone())
+ }
+}
+impl From> for ToolUseScenarioError {
+ fn from(value: SdkError) -> Self {
+ ToolUseScenarioError(match value.as_service_error() {
+ Some(value) => value.meta().message().unwrap_or("Unknown").into(),
+ None => "Unknown".into(),
+ })
+ }
+}
+// snippet-end:[rust.bedrock-runtime.Converse_AnthropicClaude.tool-use.supporting]
+
+// snippet-start:[rust.bedrock-runtime.Converse_AnthropicClaude.tool-use.user-interface]
+fn print_model_response(block: &ContentBlock) -> Result<(), ToolUseScenarioError> {
+ if block.is_text() {
+ let text = block.as_text().unwrap();
+ println!("\x1b[0;90mThe model's response:\x1b[0m\n{text}");
+ Ok(())
+ } else {
+ Err(ToolUseScenarioError(format!(
+ "Content block is not text ({block:?})"
+ )))
+ }
+}
+// snippet-end:[rust.bedrock-runtime.Converse_AnthropicClaude.tool-use.user-interface]
+
+async fn get_input() -> Result
+ * This method creates and returns a singleton instance of the {@link S3ControlAsyncClient}. If the instance
+ * has not been created yet, it will be initialized with the following configuration:
+ *
+ *
+ * @return the asynchronous S3 Control client instance
+ */
+ private static S3ControlAsyncClient getAsyncClient() {
+ if (asyncClient == null) {
+ SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder()
+ .maxConcurrency(100)
+ .connectionTimeout(Duration.ofSeconds(60))
+ .readTimeout(Duration.ofSeconds(60))
+ .writeTimeout(Duration.ofSeconds(60))
+ .build();
+
+ ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder()
+ .apiCallTimeout(Duration.ofMinutes(2))
+ .apiCallAttemptTimeout(Duration.ofSeconds(90))
+ .retryPolicy(RetryPolicy.builder()
+ .numRetries(3)
+ .build())
+ .build();
+
+ asyncClient = S3ControlAsyncClient.builder()
+ .region(Region.US_EAST_1)
+ .httpClient(httpClient)
+ .overrideConfiguration(overrideConfig)
+ .credentialsProvider(EnvironmentVariableCredentialsProvider.create())
+ .build();
+ }
+ return asyncClient;
+ }
+
+
+ /**
+ * Asynchronously lists batch jobs that have completed for the specified account.
+ *
+ * @param accountId the ID of the account to list jobs for
+ * @return a CompletableFuture that completes when the job listing operation is finished
+ */
+ public static CompletableFuture listBatchJobsAsync(String accountId) {
+ ListJobsRequest jobsRequest = ListJobsRequest.builder()
+ .jobStatuses(JobStatus.COMPLETE)
+ .accountId(accountId)
+ .maxResults(10)
+ .build();
+
+ ListJobsPublisher publisher = getAsyncClient().listJobsPaginator(jobsRequest);
+ return publisher.subscribe(response -> {
+ List jobs = response.jobs();
+ for (JobListDescriptor job : jobs) {
+ System.out.println("The job id is " + job.jobId());
+ System.out.println("The job priority is " + job.priority());
+ }
+ }).thenAccept(response -> {
+ System.out.println("Listing batch jobs completed");
+ }).exceptionally(ex -> {
+ System.err.println("Failed to list batch jobs: " + ex.getMessage());
+ throw new RuntimeException(ex);
+ });
+ }
+ // snippet-end:[s3control.java2.list_jobs.main]
+}
diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/batch/S3BatchActions.java b/javav2/example_code/s3/src/main/java/com/example/s3/batch/S3BatchActions.java
new file mode 100644
index 00000000000..70f0edb0f1e
--- /dev/null
+++ b/javav2/example_code/s3/src/main/java/com/example/s3/batch/S3BatchActions.java
@@ -0,0 +1,733 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.example.s3.batch;
+import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider;
+import software.amazon.awssdk.core.async.AsyncRequestBody;
+import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
+import software.amazon.awssdk.core.retry.RetryPolicy;
+import software.amazon.awssdk.core.waiters.WaiterResponse;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.s3.S3AsyncClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.CreateBucketRequest;
+import software.amazon.awssdk.services.s3.model.Delete;
+import software.amazon.awssdk.services.s3.model.DeleteBucketRequest;
+import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
+import software.amazon.awssdk.services.s3.model.DeleteObjectResponse;
+import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
+import software.amazon.awssdk.services.s3.model.HeadBucketRequest;
+import software.amazon.awssdk.services.s3.model.HeadBucketResponse;
+import software.amazon.awssdk.services.s3.model.HeadObjectRequest;
+import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
+import software.amazon.awssdk.services.s3.model.ObjectIdentifier;
+import software.amazon.awssdk.services.s3.model.PutObjectRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectResponse;
+import software.amazon.awssdk.services.s3.model.S3Exception;
+import software.amazon.awssdk.services.s3.waiters.S3Waiter;
+import software.amazon.awssdk.services.s3control.S3ControlAsyncClient;
+import software.amazon.awssdk.services.s3control.model.CreateJobRequest;
+import software.amazon.awssdk.services.s3control.model.CreateJobResponse;
+import software.amazon.awssdk.services.s3control.model.DeleteJobTaggingRequest;
+import software.amazon.awssdk.services.s3control.model.DescribeJobRequest;
+import software.amazon.awssdk.services.s3control.model.GetJobTaggingRequest;
+import software.amazon.awssdk.services.s3control.model.JobManifest;
+import software.amazon.awssdk.services.s3control.model.JobManifestLocation;
+import software.amazon.awssdk.services.s3control.model.JobManifestSpec;
+import software.amazon.awssdk.services.s3control.model.JobOperation;
+import software.amazon.awssdk.services.s3control.model.JobReport;
+import software.amazon.awssdk.services.s3control.model.JobStatus;
+import software.amazon.awssdk.services.s3control.model.PutJobTaggingRequest;
+import software.amazon.awssdk.services.s3control.model.S3ControlException;
+import software.amazon.awssdk.services.s3control.model.S3SetObjectTaggingOperation;
+import software.amazon.awssdk.services.s3control.model.S3Tag;
+import software.amazon.awssdk.services.s3control.model.UpdateJobPriorityRequest;
+import software.amazon.awssdk.services.s3control.model.UpdateJobStatusRequest;
+import software.amazon.awssdk.services.sts.StsClient;
+import software.amazon.awssdk.services.sts.model.GetCallerIdentityResponse;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.stream.Collectors;
+
+// snippet-start:[s3control.java2.job.actions.main]
+public class S3BatchActions {
+
+ private static S3ControlAsyncClient asyncClient;
+
+ private static S3AsyncClient s3AsyncClient ;
+ /**
+ * Retrieves the asynchronous S3 Control client instance.
+ *
+ * This method creates and returns a singleton instance of the {@link S3ControlAsyncClient}. If the instance
+ * has not been created yet, it will be initialized with the following configuration:
+ *
+ *
+ * @return the asynchronous S3 Control client instance
+ */
+ private static S3ControlAsyncClient getAsyncClient() {
+ if (asyncClient == null) {
+ SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder()
+ .maxConcurrency(100)
+ .connectionTimeout(Duration.ofSeconds(60))
+ .readTimeout(Duration.ofSeconds(60))
+ .writeTimeout(Duration.ofSeconds(60))
+ .build();
+
+ ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder()
+ .apiCallTimeout(Duration.ofMinutes(2))
+ .apiCallAttemptTimeout(Duration.ofSeconds(90))
+ .retryPolicy(RetryPolicy.builder()
+ .numRetries(3)
+ .build())
+ .build();
+
+ asyncClient = S3ControlAsyncClient.builder()
+ .region(Region.US_EAST_1)
+ .httpClient(httpClient)
+ .overrideConfiguration(overrideConfig)
+ .credentialsProvider(EnvironmentVariableCredentialsProvider.create())
+ .build();
+ }
+ return asyncClient;
+ }
+
+ private static S3AsyncClient getS3AsyncClient() {
+ if (asyncClient == null) {
+ SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder()
+ .maxConcurrency(100)
+ .connectionTimeout(Duration.ofSeconds(60))
+ .readTimeout(Duration.ofSeconds(60))
+ .writeTimeout(Duration.ofSeconds(60))
+ .build();
+
+ ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder()
+ .apiCallTimeout(Duration.ofMinutes(2))
+ .apiCallAttemptTimeout(Duration.ofSeconds(90))
+ .retryPolicy(RetryPolicy.builder()
+ .numRetries(3)
+ .build())
+ .build();
+
+ s3AsyncClient = S3AsyncClient.builder()
+ .region(Region.US_EAST_1)
+ .httpClient(httpClient)
+ .overrideConfiguration(overrideConfig)
+ .credentialsProvider(EnvironmentVariableCredentialsProvider.create())
+ .build();
+ }
+ return s3AsyncClient;
+ }
+
+
+ // snippet-start:[s3control.java2.cancel_job.main]
+ /**
+ * Cancels a job asynchronously.
+ *
+ * @param jobId The ID of the job to be canceled.
+ * @param accountId The ID of the account associated with the job.
+ * @return A {@link CompletableFuture} that completes when the job status has been updated to "CANCELLED".
+ * If an error occurs during the update, the returned future will complete exceptionally.
+ */
+ public CompletableFuture cancelJobAsync(String jobId, String accountId) {
+ UpdateJobStatusRequest updateJobStatusRequest = UpdateJobStatusRequest.builder()
+ .accountId(accountId)
+ .jobId(jobId)
+ .requestedJobStatus(String.valueOf(JobStatus.CANCELLED))
+ .build();
+
+ return asyncClient.updateJobStatus(updateJobStatusRequest)
+ .thenAccept(updateJobStatusResponse -> {
+ System.out.println("Job status updated to: " + updateJobStatusResponse.status());
+ })
+ .exceptionally(ex -> {
+ System.err.println("Failed to cancel job: " + ex.getMessage());
+ throw new RuntimeException(ex); // Propagate the exception
+ });
+ }
+ // snippet-end:[s3control.java2.cancel_job.main]
+
+ // snippet-start:[s3control.java2.update_job.main]
+ /**
+ * Updates the priority of a job asynchronously.
+ *
+ * @param jobId the ID of the job to update
+ * @param accountId the ID of the account associated with the job
+ * @return a {@link CompletableFuture} that represents the asynchronous operation, which completes when the job priority has been updated or an error has occurred
+ */
+ public CompletableFuture updateJobPriorityAsync(String jobId, String accountId) {
+ UpdateJobPriorityRequest priorityRequest = UpdateJobPriorityRequest.builder()
+ .accountId(accountId)
+ .jobId(jobId)
+ .priority(60)
+ .build();
+
+ CompletableFuture future = new CompletableFuture<>();
+ getAsyncClient().updateJobPriority(priorityRequest)
+ .thenAccept(response -> {
+ System.out.println("The job priority was updated");
+ future.complete(null); // Complete the CompletableFuture on successful execution
+ })
+ .exceptionally(ex -> {
+ System.err.println("Failed to update job priority: " + ex.getMessage());
+ future.completeExceptionally(ex); // Complete the CompletableFuture exceptionally on error
+ return null; // Return null to handle the exception
+ });
+
+ return future;
+ }
+ // snippet-end:[s3control.java2.update_job.main]
+
+ // snippet-start:[s3control.java2.get_job_tagging.main]
+ /**
+ * Asynchronously retrieves the tags associated with a specific job in an AWS account.
+ *
+ * @param jobId the ID of the job for which to retrieve the tags
+ * @param accountId the ID of the AWS account associated with the job
+ * @return a {@link CompletableFuture} that completes when the job tags have been retrieved, or with an exception if the operation fails
+ * @throws RuntimeException if an error occurs while retrieving the job tags
+ */
+ public CompletableFuture getJobTagsAsync(String jobId, String accountId) {
+ GetJobTaggingRequest request = GetJobTaggingRequest.builder()
+ .jobId(jobId)
+ .accountId(accountId)
+ .build();
+
+ return asyncClient.getJobTagging(request)
+ .thenAccept(response -> {
+ List tags = response.tags();
+ if (tags.isEmpty()) {
+ System.out.println("No tags found for job ID: " + jobId);
+ } else {
+ for (S3Tag tag : tags) {
+ System.out.println("Tag key is: " + tag.key());
+ System.out.println("Tag value is: " + tag.value());
+ }
+ }
+ })
+ .exceptionally(ex -> {
+ System.err.println("Failed to get job tags: " + ex.getMessage());
+ throw new RuntimeException(ex); // Propagate the exception
+ });
+ }
+ // snippet-end:[s3control.java2.get_job_tagging.main]
+
+ // snippet-start:[s3control.java2.del_job_tagging.main]
+ /**
+ * Asynchronously deletes the tags associated with a specific batch job.
+ *
+ * @param jobId The ID of the batch job whose tags should be deleted.
+ * @param accountId The ID of the account associated with the batch job.
+ * @return A CompletableFuture that completes when the job tags have been successfully deleted, or an exception is thrown if the deletion fails.
+ */
+ public CompletableFuture deleteBatchJobTagsAsync(String jobId, String accountId) {
+ DeleteJobTaggingRequest jobTaggingRequest = DeleteJobTaggingRequest.builder()
+ .accountId(accountId)
+ .jobId(jobId)
+ .build();
+
+ return asyncClient.deleteJobTagging(jobTaggingRequest)
+ .thenAccept(response -> {
+ System.out.println("You have successfully deleted " + jobId + " tagging.");
+ })
+ .exceptionally(ex -> {
+ System.err.println("Failed to delete job tags: " + ex.getMessage());
+ throw new RuntimeException(ex);
+ });
+ }
+ // snippet-end:[s3control.java2.del_job_tagging.main]
+
+ // snippet-start:[s3control.java2.describe_job.main]
+ /**
+ * Asynchronously describes the specified job.
+ *
+ * @param jobId the ID of the job to describe
+ * @param accountId the ID of the AWS account associated with the job
+ * @return a {@link CompletableFuture} that completes when the job description is available
+ * @throws RuntimeException if an error occurs while describing the job
+ */
+ public CompletableFuture describeJobAsync(String jobId, String accountId) {
+ DescribeJobRequest jobRequest = DescribeJobRequest.builder()
+ .jobId(jobId)
+ .accountId(accountId)
+ .build();
+
+ return getAsyncClient().describeJob(jobRequest)
+ .thenAccept(response -> {
+ System.out.println("Job ID: " + response.job().jobId());
+ System.out.println("Description: " + response.job().description());
+ System.out.println("Status: " + response.job().statusAsString());
+ System.out.println("Role ARN: " + response.job().roleArn());
+ System.out.println("Priority: " + response.job().priority());
+ System.out.println("Progress Summary: " + response.job().progressSummary());
+
+ // Print out details about the job manifest.
+ JobManifest manifest = response.job().manifest();
+ System.out.println("Manifest Location: " + manifest.location().objectArn());
+ System.out.println("Manifest ETag: " + manifest.location().eTag());
+
+ // Print out details about the job operation.
+ JobOperation operation = response.job().operation();
+ if (operation.s3PutObjectTagging() != null) {
+ System.out.println("Operation: S3 Put Object Tagging");
+ System.out.println("Tag Set: " + operation.s3PutObjectTagging().tagSet());
+ }
+
+ // Print out details about the job report.
+ JobReport report = response.job().report();
+ System.out.println("Report Bucket: " + report.bucket());
+ System.out.println("Report Prefix: " + report.prefix());
+ System.out.println("Report Format: " + report.format());
+ System.out.println("Report Enabled: " + report.enabled());
+ System.out.println("Report Scope: " + report.reportScopeAsString());
+ })
+ .exceptionally(ex -> {
+ System.err.println("Failed to describe job: " + ex.getMessage());
+ throw new RuntimeException(ex);
+ });
+ }
+ // snippet-end:[s3control.java2.describe_job.main]
+
+ // snippet-start:[s3control.java2.create_job.async.main]
+ /**
+ * Creates an asynchronous S3 job using the AWS Java SDK.
+ *
+ * @param accountId the AWS account ID associated with the job
+ * @param iamRoleArn the ARN of the IAM role to be used for the job
+ * @param manifestLocation the location of the job manifest file in S3
+ * @param reportBucketName the name of the S3 bucket to store the job report
+ * @param uuid a unique identifier for the job
+ * @return a CompletableFuture that represents the asynchronous creation of the S3 job.
+ * The CompletableFuture will return the job ID if the job is created successfully,
+ * or throw an exception if there is an error.
+ */
+ public CompletableFuture createS3JobAsync(String accountId, String iamRoleArn,
+ String manifestLocation, String reportBucketName, String uuid) {
+
+ String[] bucketName = new String[]{""};
+ String[] parts = reportBucketName.split(":::");
+ if (parts.length > 1) {
+ bucketName[0] = parts[1];
+ } else {
+ System.out.println("The input string does not contain the expected format.");
+ }
+
+ return CompletableFuture.supplyAsync(() -> getETag(bucketName[0], "job-manifest.csv"))
+ .thenCompose(eTag -> {
+ ArrayList tagSet = new ArrayList<>();
+ S3Tag s3Tag = S3Tag.builder()
+ .key("keyOne")
+ .value("ValueOne")
+ .build();
+ S3Tag s3Tag2 = S3Tag.builder()
+ .key("keyTwo")
+ .value("ValueTwo")
+ .build();
+ tagSet.add(s3Tag);
+ tagSet.add(s3Tag2);
+
+ S3SetObjectTaggingOperation objectTaggingOperation = S3SetObjectTaggingOperation.builder()
+ .tagSet(tagSet)
+ .build();
+
+ JobOperation jobOperation = JobOperation.builder()
+ .s3PutObjectTagging(objectTaggingOperation)
+ .build();
+
+ JobManifestLocation jobManifestLocation = JobManifestLocation.builder()
+ .objectArn(manifestLocation)
+ .eTag(eTag)
+ .build();
+
+ JobManifestSpec manifestSpec = JobManifestSpec.builder()
+ .fieldsWithStrings("Bucket", "Key")
+ .format("S3BatchOperations_CSV_20180820")
+ .build();
+
+ JobManifest jobManifest = JobManifest.builder()
+ .spec(manifestSpec)
+ .location(jobManifestLocation)
+ .build();
+
+ JobReport jobReport = JobReport.builder()
+ .bucket(reportBucketName)
+ .prefix("reports")
+ .format("Report_CSV_20180820")
+ .enabled(true)
+ .reportScope("AllTasks")
+ .build();
+
+ CreateJobRequest jobRequest = CreateJobRequest.builder()
+ .accountId(accountId)
+ .description("Job created using the AWS Java SDK")
+ .manifest(jobManifest)
+ .operation(jobOperation)
+ .report(jobReport)
+ .priority(42)
+ .roleArn(iamRoleArn)
+ .clientRequestToken(uuid)
+ .confirmationRequired(false)
+ .build();
+
+ // Create the job asynchronously.
+ return getAsyncClient().createJob(jobRequest)
+ .thenApply(CreateJobResponse::jobId);
+ })
+ .handle((jobId, ex) -> {
+ if (ex != null) {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof S3ControlException) {
+ throw new CompletionException(cause);
+ } else {
+ throw new RuntimeException(cause);
+ }
+ }
+ return jobId;
+ });
+ }
+ // snippet-end:[s3control.java2.create_job.async.main]
+
+ /**
+ * Retrieves the ETag (Entity Tag) for an object stored in an Amazon S3 bucket.
+ *
+ * @param bucketName the name of the Amazon S3 bucket where the object is stored
+ * @param key the key (file name) of the object in the Amazon S3 bucket
+ * @return the ETag of the object
+ */
+ public String getETag(String bucketName, String key) {
+ S3Client s3Client = S3Client.builder()
+ .region(Region.US_EAST_1)
+ .build();
+
+ HeadObjectRequest headObjectRequest = HeadObjectRequest.builder()
+ .bucket(bucketName)
+ .key(key)
+ .build();
+
+ HeadObjectResponse headObjectResponse = s3Client.headObject(headObjectRequest);
+ return headObjectResponse.eTag();
+ }
+
+ // snippet-start:[s3control.java2.job.put.tags.main]
+ /**
+ * Asynchronously adds tags to a job in the system.
+ *
+ * @param jobId the ID of the job to add tags to
+ * @param accountId the account ID associated with the job
+ * @return a CompletableFuture that completes when the tagging operation is finished
+ */
+ public CompletableFuture putJobTaggingAsync(String jobId, String accountId) {
+ S3Tag departmentTag = S3Tag.builder()
+ .key("department")
+ .value("Marketing")
+ .build();
+
+ S3Tag fiscalYearTag = S3Tag.builder()
+ .key("FiscalYear")
+ .value("2020")
+ .build();
+
+ PutJobTaggingRequest putJobTaggingRequest = PutJobTaggingRequest.builder()
+ .jobId(jobId)
+ .accountId(accountId)
+ .tags(departmentTag, fiscalYearTag)
+ .build();
+
+ return asyncClient.putJobTagging(putJobTaggingRequest)
+ .thenRun(() -> {
+ System.out.println("Additional Tags were added to job " + jobId);
+ })
+ .exceptionally(ex -> {
+ System.err.println("Failed to add tags to job: " + ex.getMessage());
+ throw new RuntimeException(ex); // Propagate the exception
+ });
+ }
+ // snippet-end:[s3control.java2.job.put.tags.main]
+
+ // Setup the S3 bucket required for this scenario.
+ /**
+ * Creates an Amazon S3 bucket with the specified name.
+ *
+ * @param bucketName the name of the S3 bucket to create
+ * @throws S3Exception if there is an error creating the bucket
+ */
+ public void createBucket(String bucketName) {
+ try {
+ S3Client s3Client = S3Client.builder()
+ .region(Region.US_EAST_1)
+ .build();
+
+
+ S3Waiter s3Waiter = s3Client.waiter();
+ CreateBucketRequest bucketRequest = CreateBucketRequest.builder()
+ .bucket(bucketName)
+ .build();
+
+ s3Client.createBucket(bucketRequest);
+ HeadBucketRequest bucketRequestWait = HeadBucketRequest.builder()
+ .bucket(bucketName)
+ .build();
+
+ // Wait until the bucket is created and print out the response.
+ WaiterResponse waiterResponse = s3Waiter.waitUntilBucketExists(bucketRequestWait);
+ waiterResponse.matched().response().ifPresent(System.out::println);
+ System.out.println(bucketName + " is ready");
+
+ } catch (S3Exception e) {
+ System.err.println(e.awsErrorDetails().errorMessage());
+ System.exit(1);
+ }
+ }
+
+ /**
+ * Uploads a file to an Amazon S3 bucket asynchronously.
+ *
+ * @param bucketName the name of the S3 bucket to upload the file to
+ * @param fileName the name of the file to be uploaded
+ * @throws RuntimeException if an error occurs during the file upload
+ */
+ public void populateBucket(String bucketName, String fileName) {
+ // Define the path to the directory.
+ Path filePath = Paths.get("src/main/resources/batch/", fileName).toAbsolutePath();
+ PutObjectRequest putOb = PutObjectRequest.builder()
+ .bucket(bucketName)
+ .key(fileName)
+ .build();
+
+ CompletableFuture future = getS3AsyncClient().putObject(putOb, AsyncRequestBody.fromFile(filePath));
+ future.whenComplete((result, ex) -> {
+ if (ex != null) {
+ System.err.println("Error uploading file: " + ex.getMessage());
+ } else {
+ System.out.println("Successfully placed " + fileName + " into bucket " + bucketName);
+ }
+ }).join();
+ }
+
+
+ // Update the bucketName in CSV.
+ public void updateCSV(String newValue) {
+ Path csvFilePath = Paths.get("src/main/resources/batch/job-manifest.csv").toAbsolutePath();
+ try {
+ // Read all lines from the CSV file.
+ List lines = Files.readAllLines(csvFilePath);
+
+ // Update the first value in each line.
+ List updatedLines = lines.stream()
+ .map(line -> {
+ String[] parts = line.split(",");
+ parts[0] = newValue;
+ return String.join(",", parts);
+ })
+ .collect(Collectors.toList());
+
+ // Write the updated lines back to the CSV file
+ Files.write(csvFilePath, updatedLines);
+ System.out.println("CSV file updated successfully.");
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Deletes an object from an Amazon S3 bucket asynchronously.
+ *
+ * @param bucketName The name of the S3 bucket where the object is stored.
+ * @param objectName The name of the object to be deleted.
+ * @return A {@link CompletableFuture} that completes when the object has been deleted,
+ * or throws a {@link RuntimeException} if an error occurs during the deletion.
+ */
+ public CompletableFuture deleteBucketObjects(String bucketName, String objectName) {
+ ArrayList toDelete = new ArrayList<>();
+ toDelete.add(ObjectIdentifier.builder()
+ .key(objectName)
+ .build());
+
+ DeleteObjectsRequest dor = DeleteObjectsRequest.builder()
+ .bucket(bucketName)
+ .delete(Delete.builder()
+ .objects(toDelete).build())
+ .build();
+
+ return getS3AsyncClient().deleteObjects(dor)
+ .thenAccept(result -> {
+ System.out.println("The object was deleted!");
+ })
+ .exceptionally(ex -> {
+ throw new RuntimeException("Error deleting object: " + ex.getMessage(), ex);
+ });
+ }
+
+ /**
+ * Deletes a folder and all its contents asynchronously from an Amazon S3 bucket.
+ *
+ * @param bucketName the name of the S3 bucket containing the folder to be deleted
+ * @return a {@link CompletableFuture} that completes when the folder and its contents have been deleted
+ * @throws RuntimeException if any error occurs during the deletion process
+ */
+ public void deleteBucketFolderAsync(String bucketName) {
+ String folderName = "reports/";
+ ListObjectsV2Request request = ListObjectsV2Request.builder()
+ .bucket(bucketName)
+ .prefix(folderName)
+ .build();
+
+ CompletableFuture listObjectsFuture = getS3AsyncClient().listObjectsV2(request);
+ listObjectsFuture.thenCompose(response -> {
+ List> deleteFutures = response.contents().stream()
+ .map(obj -> {
+ DeleteObjectRequest deleteRequest = DeleteObjectRequest.builder()
+ .bucket(bucketName)
+ .key(obj.key())
+ .build();
+ return getS3AsyncClient().deleteObject(deleteRequest)
+ .thenApply(deleteResponse -> {
+ System.out.println("Deleted object: " + obj.key());
+ return deleteResponse;
+ });
+ })
+ .collect(Collectors.toList());
+
+ return CompletableFuture.allOf(deleteFutures.toArray(new CompletableFuture[0]))
+ .thenCompose(v -> {
+ // Delete the folder.
+ DeleteObjectRequest deleteRequest = DeleteObjectRequest.builder()
+ .bucket(bucketName)
+ .key(folderName)
+ .build();
+ return getS3AsyncClient().deleteObject(deleteRequest)
+ .thenApply(deleteResponse -> {
+ System.out.println("Deleted folder: " + folderName);
+ return deleteResponse;
+ });
+ });
+ }).join();
+ }
+
+ /**
+ * Deletes an Amazon S3 bucket.
+ *
+ * @param bucketName the name of the bucket to delete
+ * @return a {@link CompletableFuture} that completes when the bucket has been deleted, or exceptionally if there is an error
+ * @throws RuntimeException if there is an error deleting the bucket
+ */
+ public CompletableFuture deleteBucket(String bucketName) {
+ S3AsyncClient s3Client = getS3AsyncClient();
+ return s3Client.deleteBucket(DeleteBucketRequest.builder()
+ .bucket(bucketName)
+ .build())
+ .thenAccept(deleteBucketResponse -> {
+ System.out.println(bucketName + " was deleted");
+ })
+ .exceptionally(ex -> {
+ // Handle the exception or rethrow it.
+ throw new RuntimeException("Failed to delete bucket: " + bucketName, ex);
+ });
+ }
+
+ /**
+ * Uploads a set of files to an Amazon S3 bucket.
+ *
+ * @param bucketName the name of the S3 bucket to upload the files to
+ * @param fileNames an array of file names to be uploaded
+ * @param actions an instance of {@link S3BatchActions} that provides the implementation for the necessary S3 operations
+ * @throws IOException if there's an error creating the text files or uploading the files to the S3 bucket
+ */
+ public static void uploadFilesToBucket(String bucketName, String[] fileNames, S3BatchActions actions) throws IOException {
+ actions.updateCSV(bucketName);
+ createTextFiles(fileNames);
+ for (String fileName : fileNames) {
+ actions.populateBucket(bucketName, fileName);
+ }
+ System.out.println("All files are placed in the S3 bucket " + bucketName);
+ }
+
+ /**
+ * Deletes the specified files from the given S3 bucket.
+ *
+ * @param bucketName the name of the S3 bucket
+ * @param fileNames an array of file names to be deleted from the bucket
+ * @param actions the S3BatchActions instance to be used for the file deletion
+ * @throws IOException if an I/O error occurs during the file deletion
+ */
+ public void deleteFilesFromBucket(String bucketName, String[] fileNames, S3BatchActions actions) throws IOException {
+ for (String fileName : fileNames) {
+ actions.deleteBucketObjects(bucketName, fileName)
+ .thenRun(() -> System.out.println("Object deletion completed"))
+ .exceptionally(ex -> {
+ System.err.println("Error occurred: " + ex.getMessage());
+ return null;
+ });
+ }
+ System.out.println("All files have been deleted from the bucket " + bucketName);
+ }
+
+ public static void createTextFiles(String[] fileNames) {
+ String currentDirectory = System.getProperty("user.dir");
+ String directoryPath = currentDirectory + "\\src\\main\\resources\\batch";
+ Path path = Paths.get(directoryPath);
+
+ try {
+ // Create the directory if it doesn't exist.
+ if (Files.notExists(path)) {
+ Files.createDirectories(path);
+ System.out.println("Created directory: " + path.toString());
+ } else {
+ System.out.println("Directory already exists: " + path.toString());
+ }
+
+ for (String fileName : fileNames) {
+ // Check if the file is a .txt file.
+ if (fileName.endsWith(".txt")) {
+ // Define the path for the new file.
+ Path filePath = path.resolve(fileName);
+ System.out.println("Attempting to create file: " + filePath.toString());
+
+ // Create and write content to the new file.
+ Files.write(filePath, "This is a test".getBytes());
+
+ // Verify the file was created.
+ if (Files.exists(filePath)) {
+ System.out.println("Successfully created file: " + filePath.toString());
+ } else {
+ System.out.println("Failed to create file: " + filePath.toString());
+ }
+ }
+ }
+
+ } catch (IOException e) {
+ System.err.println("An error occurred: " + e.getMessage());
+ e.printStackTrace();
+ }
+ }
+
+ public String getAccountId() {
+ StsClient stsClient = StsClient.builder()
+ .region(Region.US_EAST_1)
+ .build();
+
+ GetCallerIdentityResponse callerIdentityResponse = stsClient.getCallerIdentity();
+ return callerIdentityResponse.account();
+ }
+}
+// snippet-end:[s3control.java2.job.actions.main]
diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/batch/S3BatchScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/batch/S3BatchScenario.java
new file mode 100644
index 00000000000..9cd4be0cce2
--- /dev/null
+++ b/javav2/example_code/s3/src/main/java/com/example/s3/batch/S3BatchScenario.java
@@ -0,0 +1,238 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+// snippet-start:[s3control.java2.job.scenario.main]
+package com.example.s3.batch;
+
+import software.amazon.awssdk.services.s3.model.S3Exception;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Scanner;
+import java.util.UUID;
+import java.util.concurrent.CompletionException;
+
+public class S3BatchScenario {
+
+ public static final String DASHES = new String(new char[80]).replace("\0", "-");
+ private static final String STACK_NAME = "MyS3Stack";
+ public static void main(String[] args) throws IOException {
+ S3BatchActions actions = new S3BatchActions();
+ String accountId = actions.getAccountId();
+ String uuid = java.util.UUID.randomUUID().toString();
+ Scanner scanner = new Scanner(System.in);
+
+ System.out.println(DASHES);
+ System.out.println("Welcome to the Amazon S3 Batch basics scenario.");
+ System.out.println("""
+ S3 Batch operations enables efficient and cost-effective processing of large-scale
+ data stored in Amazon S3. It automatically scales resources to handle varying workloads
+ without the need for manual intervention.
+
+ One of the key features of S3 Batch is its ability to perform tagging operations on objects stored in
+ S3 buckets. Users can leverage S3 Batch to apply, update, or remove tags on thousands or millions of
+ objects in a single operation, streamlining the management and organization of their data.
+
+ This can be particularly useful for tasks such as cost allocation, lifecycle management, or
+ metadata-driven workflows, where consistent and accurate tagging is essential.
+ S3 Batch's scalability and serverless nature make it an ideal solution for organizations with
+ growing data volumes and complex data management requirements.
+
+ This Java program walks you through Amazon S3 Batch operations.
+
+ Let's get started...
+
+ """);
+ waitForInputToContinue(scanner);
+ // Use CloudFormation to stand up the resource required for this scenario.
+ System.out.println("Use CloudFormation to stand up the resource required for this scenario.");
+ CloudFormationHelper.deployCloudFormationStack(STACK_NAME);
+
+ Map stackOutputs = CloudFormationHelper.getStackOutputs(STACK_NAME);
+ String iamRoleArn = stackOutputs.get("S3BatchRoleArn");
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.println("Setup the required bucket for this scenario.");
+ waitForInputToContinue(scanner);
+ String bucketName = "x-" + UUID.randomUUID();
+ actions.createBucket(bucketName);
+ String reportBucketName = "arn:aws:s3:::"+bucketName;
+ String manifestLocation = "arn:aws:s3:::"+bucketName+"/job-manifest.csv";
+ System.out.println("Populate the bucket with the required files.");
+ String[] fileNames = {"job-manifest.csv", "object-key-1.txt", "object-key-2.txt", "object-key-3.txt", "object-key-4.txt"};
+ actions.uploadFilesToBucket(bucketName, fileNames, actions);
+ waitForInputToContinue(scanner);
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.println("1. Create a S3 Batch Job");
+ System.out.println("This job tags all objects listed in the manifest file with tags");
+ waitForInputToContinue(scanner);
+ String jobId ;
+ try {
+ jobId = actions.createS3JobAsync(accountId, iamRoleArn, manifestLocation, reportBucketName, uuid).join();
+ System.out.println("The Job id is " + jobId);
+
+ } catch (S3Exception e) {
+ System.err.println("SSM error: " + e.getMessage());
+ return;
+ } catch (RuntimeException e) {
+ System.err.println("Unexpected error: " + e.getMessage());
+ return;
+ }
+
+ waitForInputToContinue(scanner);
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.println("2. Update an existing S3 Batch Operations job's priority");
+ System.out.println("""
+ In this step, we modify the job priority value. The higher the number, the higher the priority.
+ So, a job with a priority of `30` would have a higher priority than a job with
+ a priority of `20`. This is a common way to represent the priority of a task
+ or job, with higher numbers indicating a higher priority.
+
+ Ensure that the job status allows for priority updates. Jobs in certain
+ states (e.g., Cancelled, Failed, or Completed) cannot have their priorities
+ updated. Only jobs in the Active or Suspended state typically allow priority
+ updates.
+ """);
+
+ try {
+ actions.updateJobPriorityAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ System.err.println("Update job priority failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ System.err.println("Failed to update job priority: " + ex.getMessage());
+ }
+ waitForInputToContinue(scanner);
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.println("3. Cancel the S3 Batch job");
+ System.out.print("Do you want to cancel the Batch job? (y/n): ");
+ String cancelAns = scanner.nextLine();
+ if (cancelAns != null && cancelAns.trim().equalsIgnoreCase("y")) {
+ try {
+ actions.cancelJobAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ System.err.println("Cancel job failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ System.err.println("Failed to cancel job: " + ex.getMessage());
+ }
+ } else {
+ System.out.println("Job " +jobId +" was not canceled.");
+ }
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.println("4. Describe the job that was just created");
+ waitForInputToContinue(scanner);
+ try {
+ actions.describeJobAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ System.err.println("Describe job failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ System.err.println("Failed to describe job: " + ex.getMessage());
+ }
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.println("5. Describe the tags associated with the job");
+ waitForInputToContinue(scanner);
+ try {
+ actions.getJobTagsAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ System.err.println("Get job tags failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ System.err.println("Failed to get job tags: " + ex.getMessage());
+ }
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.println("6. Update Batch Job Tags");
+ waitForInputToContinue(scanner);
+ try {
+ actions.putJobTaggingAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ System.err.println("Put job tagging failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ System.err.println("Failed to put job tagging: " + ex.getMessage());
+ }
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.println("7. Delete the Amazon S3 Batch job tagging.");
+ System.out.print("Do you want to delete Batch job tagging? (y/n)");
+ String delAns = scanner.nextLine();
+ if (delAns != null && delAns.trim().equalsIgnoreCase("y")) {
+ try {
+ actions.deleteBatchJobTagsAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ System.err.println("Delete batch job tags failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ System.err.println("Failed to delete batch job tags: " + ex.getMessage());
+ }
+ } else {
+ System.out.println("Tagging was not deleted.");
+ }
+ System.out.println(DASHES);
+
+ System.out.println(DASHES);
+ System.out.print("Do you want to delete the AWS resources used in this scenario? (y/n)");
+ String delResAns = scanner.nextLine();
+ if (delResAns != null && delResAns.trim().equalsIgnoreCase("y")) {
+ actions.deleteFilesFromBucket(bucketName, fileNames, actions);
+ actions.deleteBucketFolderAsync(bucketName);
+ actions.deleteBucket(bucketName)
+ .thenRun(() -> System.out.println("Bucket deletion completed"))
+ .exceptionally(ex -> {
+ System.err.println("Error occurred: " + ex.getMessage());
+ return null;
+ });
+ CloudFormationHelper.destroyCloudFormationStack(STACK_NAME);
+ } else {
+ System.out.println("The AWS resources were not deleted.");
+ }
+ System.out.println("The Amazon S3 Batch scenario has successfully completed.");
+ System.out.println(DASHES);
+ }
+
+ private static void waitForInputToContinue(Scanner scanner) {
+ while (true) {
+ System.out.println();
+ System.out.println("Enter 'c' followed by to continue:");
+ String input = scanner.nextLine();
+
+ if (input.trim().equalsIgnoreCase("c")) {
+ System.out.println("Continuing with the program...");
+ System.out.println();
+ break;
+ } else {
+ // Handle invalid input.
+ System.out.println("Invalid input. Please try again.");
+ }
+ }
+ }
+
+
+}
+// snippet-end:[s3control.java2.job.scenario.main]
\ No newline at end of file
diff --git a/javav2/example_code/s3/src/main/resources/batch/job-manifest.csv b/javav2/example_code/s3/src/main/resources/batch/job-manifest.csv
new file mode 100644
index 00000000000..8d08f24a3b4
--- /dev/null
+++ b/javav2/example_code/s3/src/main/resources/batch/job-manifest.csv
@@ -0,0 +1,4 @@
+x-f8798006-c1ac-44b9-8372-ec2b4704faa7,object-key-1.txt
+x-f8798006-c1ac-44b9-8372-ec2b4704faa7,object-key-2.txt
+x-f8798006-c1ac-44b9-8372-ec2b4704faa7,object-key-3.txt
+x-f8798006-c1ac-44b9-8372-ec2b4704faa7,object-key-4.txt
diff --git a/javav2/example_code/s3/src/test/java/S3BatchTest.java b/javav2/example_code/s3/src/test/java/S3BatchTest.java
new file mode 100644
index 00000000000..9ada7bfe46d
--- /dev/null
+++ b/javav2/example_code/s3/src/test/java/S3BatchTest.java
@@ -0,0 +1,207 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+import com.example.s3.batch.CloudFormationHelper;
+import com.example.s3.batch.HelloS3Batch;
+import com.example.s3.batch.S3BatchActions;
+import com.google.gson.Gson;
+import org.junit.jupiter.api.*;
+import java.io.IOException;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.CompletionException;
+
+import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.s3.model.S3Exception;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse;
+
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.fail;
+
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+public class S3BatchTest {
+
+ private static String accountId = "";
+ private static final String STACK_NAME = "MyS3Stack";
+ private static String bucketName;
+ private static String reportBucketName;
+ private static String manifestLocation;
+ private static S3BatchActions actions;
+ private static String jobId;
+
+ private static String iamRoleArn;
+
+ @BeforeAll
+ public static void setUp() throws IOException {
+ // Get the values to run these tests from AWS Secrets Manager.
+ Gson gson = new Gson();
+ String json = getSecretValues();
+ AmazonS3Test.SecretValues values = gson.fromJson(json, AmazonS3Test.SecretValues.class);
+ accountId = values.getAccountId();
+
+ actions = new S3BatchActions();
+ CloudFormationHelper.deployCloudFormationStack(STACK_NAME);
+ Map stackOutputs = CloudFormationHelper.getStackOutputs(STACK_NAME);
+ iamRoleArn = stackOutputs.get("S3BatchRoleArn");
+
+ bucketName = "x-" + UUID.randomUUID();
+ actions.createBucket(bucketName);
+ reportBucketName = "arn:aws:s3:::" + bucketName;
+ manifestLocation = "arn:aws:s3:::" + bucketName + "/job-manifest.csv";
+
+ String[] fileNames = {"job-manifest.csv", "object-key-1.txt", "object-key-2.txt", "object-key-3.txt", "object-key-4.txt"};
+ actions.uploadFilesToBucket(bucketName, fileNames, actions);
+ }
+
+ @AfterAll
+ public static void tearDown() throws IOException {
+ if (actions != null) {
+ String[] fileNames = {"job-manifest.csv", "object-key-1.txt", "object-key-2.txt", "object-key-3.txt", "object-key-4.txt"};
+ actions.deleteFilesFromBucket(bucketName, fileNames, actions);
+ actions.deleteBucketFolderAsync(bucketName);
+ actions.deleteBucket(bucketName);
+ CloudFormationHelper.destroyCloudFormationStack(STACK_NAME);
+ }
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(1)
+ public void testCreateAndCancelJob() {
+ try {
+ jobId = actions.createS3JobAsync(accountId, iamRoleArn, manifestLocation, reportBucketName, UUID.randomUUID().toString()).join();
+ assertNotNull(jobId);
+ } catch (S3Exception e) {
+ fail("S3Exception during job creation: " + e.getMessage());
+ } catch (RuntimeException e) {
+ fail("Unexpected error during job creation: " + e.getMessage());
+ }
+
+ try {
+ actions.updateJobPriorityAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ fail("Update job priority failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ fail("Failed to update job priority: " + ex.getMessage());
+ }
+
+ try {
+ actions.cancelJobAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ fail("Cancel job failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ fail("Failed to cancel job: " + ex.getMessage());
+ }
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(2)
+ public void testDescribeJob() {
+ try {
+ actions.describeJobAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ fail("Describe job failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ fail("Failed to describe job: " + ex.getMessage());
+ }
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(3)
+ public void testGetAndPutJobTags() {
+ try {
+ actions.getJobTagsAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ fail("Get job tags failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ fail("Failed to get job tags: " + ex.getMessage());
+ }
+
+ try {
+ actions.putJobTaggingAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ fail("Put job tagging failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ fail("Failed to put job tagging: " + ex.getMessage());
+ }
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(4)
+ public void testListBatchJobs() {
+ try {
+ HelloS3Batch.listBatchJobsAsync(accountId)
+ .exceptionally(ex -> {
+ fail("List batch jobs failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ fail("Failed to list batch jobs: " + ex.getMessage());
+ }
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(5)
+ public void testDeleteJobTags() {
+ try {
+ actions.deleteBatchJobTagsAsync(jobId, accountId)
+ .exceptionally(ex -> {
+ fail("Delete batch job tags failed: " + ex.getMessage());
+ return null;
+ })
+ .join();
+ } catch (CompletionException ex) {
+ fail("Failed to delete batch job tags: " + ex.getMessage());
+ }
+ }
+
+ private static String getSecretValues() {
+ SecretsManagerClient secretClient = SecretsManagerClient.builder()
+ .region(Region.US_EAST_1)
+ .credentialsProvider(EnvironmentVariableCredentialsProvider.create())
+ .build();
+ String secretName = "test/s3";
+
+ GetSecretValueRequest valueRequest = GetSecretValueRequest.builder()
+ .secretId(secretName)
+ .build();
+
+ GetSecretValueResponse valueResponse = secretClient.getSecretValue(valueRequest);
+ return valueResponse.secretString();
+ }
+
+ @Nested
+ @DisplayName("A class used to get test values from test/s3 (an AWS Secrets Manager secret)")
+ class SecretValues {
+ private String accountId;
+ public String getAccountId() {
+ return accountId;
+ }
+
+
+ }
+}
diff --git a/javav2/example_code/s3/template.yaml b/javav2/example_code/s3/template.yaml
new file mode 100644
index 00000000000..952db67fac1
--- /dev/null
+++ b/javav2/example_code/s3/template.yaml
@@ -0,0 +1,142 @@
+Resources:
+ S3BatchRole8238262D:
+ Type: AWS::IAM::Role
+ Properties:
+ AssumeRolePolicyDocument:
+ Statement:
+ - Action: sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service: batchoperations.s3.amazonaws.com
+ Version: "2012-10-17"
+ Metadata:
+ aws:cdk:path: MyS3Stack/S3BatchRole/Resource
+ S3BatchPolicy13F783AA:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyDocument:
+ Statement:
+ - Action:
+ - s3:DeleteObject
+ - s3:GetObject
+ - s3:ListBucket
+ - s3:PutObject
+ Effect: Allow
+ Resource: "*"
+ Version: "2012-10-17"
+ PolicyName: S3BatchPolicy13F783AA
+ Roles:
+ - Ref: S3BatchRole8238262D
+ Metadata:
+ aws:cdk:path: MyS3Stack/S3BatchPolicy/Resource
+ CDKMetadata:
+ Type: AWS::CDK::Metadata
+ Properties:
+ Analytics: v2:deflate64:H4sIAAAAAAAA/zPSMzQ21TNQTCwv1k1OydbNyUzSqw4uSUzO1kksL47PTMzVqw7Kz0nVcU7LA9MB+TmZyZUgLoRVqxOUWpxfWpQMVuJfWlJQWgJWjCTqnJ+XklmSmZ9Xq+OVWJaob2iuZ6BnaKCYVZyZqVtUmleSmZuqFwShAWIQoq+RAAAA
+ Metadata:
+ aws:cdk:path: MyS3Stack/CDKMetadata/Default
+ Condition: CDKMetadataAvailable
+Outputs:
+ S3BatchRoleArn:
+ Description: The ARN of the S3 Batch Role
+ Value:
+ Fn::GetAtt:
+ - S3BatchRole8238262D
+ - Arn
+Conditions:
+ CDKMetadataAvailable:
+ Fn::Or:
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - af-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-east-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-northeast-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-northeast-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-southeast-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ap-southeast-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - ca-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - cn-north-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - cn-northwest-1
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-north-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - eu-west-3
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - il-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - me-central-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - me-south-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - sa-east-1
+ - Fn::Or:
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-east-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-east-2
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-west-1
+ - Fn::Equals:
+ - Ref: AWS::Region
+ - us-west-2
+Parameters:
+ BootstrapVersion:
+ Type: AWS::SSM::Parameter::Value
+ Default: /cdk-bootstrap/hnb659fds/version
+ Description: Version of the CDK Bootstrap resources in this environment, automatically retrieved from SSM Parameter Store. [cdk:skip]
+Rules:
+ CheckBootstrapVersion:
+ Assertions:
+ - Assert:
+ Fn::Not:
+ - Fn::Contains:
+ - - "1"
+ - "2"
+ - "3"
+ - "4"
+ - "5"
+ - Ref: BootstrapVersion
+ AssertDescription: CDK bootstrap stack version 6 required. Please run 'cdk bootstrap' with a recent version of the CDK CLI.
+
From ae69757d912e3f1cf4ab537e00dffa4dc2421901 Mon Sep 17 00:00:00 2001
From: Steven Meyer <108885656+meyertst-aws@users.noreply.github.com>
Date: Fri, 2 Aug 2024 09:24:20 -0400
Subject: [PATCH 15/98] Mult-language: HealthImaging - update examples (#6696)
---
.../metadata/medical-imaging_metadata.yaml | 30 ++-
.../medical-imaging/actions/copy-image-set.js | 194 +++++++++-------
.../actions/update-image-set-metadata.js | 83 ++++---
.../example_code/medical-imaging/package.json | 10 +-
.../update-image-set-metadata.unit.test.js | 111 ++++-----
javav2/example_code/medicalimaging/README.md | 4 +-
javav2/example_code/medicalimaging/pom.xml | 23 +-
.../example/medicalimaging/CopyImageSet.java | 211 ++++++++++++++----
.../medicalimaging/SearchImageSets.java | 2 +-
.../UpdateImageSetMetadata.java | 197 +++++++++-------
.../src/test/java/AWSMedicalImagingTest.java | 18 +-
python/example_code/medical-imaging/README.md | 10 +-
.../medical-imaging/medical_imaging_basics.py | 46 +++-
.../medical-imaging/requirements.txt | 4 +-
.../test/test_medical_imaging_basics.py | 20 +-
python/test_tools/medical_imaging_stubber.py | 19 +-
16 files changed, 646 insertions(+), 336 deletions(-)
diff --git a/.doc_gen/metadata/medical-imaging_metadata.yaml b/.doc_gen/metadata/medical-imaging_metadata.yaml
index 9b1b97be602..9ae05d50516 100644
--- a/.doc_gen/metadata/medical-imaging_metadata.yaml
+++ b/.doc_gen/metadata/medical-imaging_metadata.yaml
@@ -661,6 +661,7 @@ medical-imaging_UpdateImageSetMetadata:
github_note_at_bottom: true
excerpts:
- description:
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.MedicalImagingWrapper
- python.example_code.medical-imaging.UpdateImageSetMetadata
@@ -677,6 +678,9 @@ medical-imaging_UpdateImageSetMetadata:
- description: 'Use case #3: Remove an instance.'
snippet_tags:
- python.example_code.medical-imaging.UpdateImageSetMetadata.remove_instance
+ - description: 'Use case #4: Revert to an earlier version.'
+ snippet_tags:
+ - python.example_code.medical-imaging.UpdateImageSetMetadata.revert
JavaScript:
versions:
- sdk_version: 3
@@ -684,9 +688,10 @@ medical-imaging_UpdateImageSetMetadata:
github_note_at_bottom: true
excerpts:
- description:
+ genai: some
snippet_tags:
- medical-imaging.JavaScript.datastore.updateImageSetMetadataV3
- - description: 'Use case #1: Insert or update an attribute.'
+ - description: 'Use case #1: Insert or update an attribute and force the update.'
snippet_tags:
- medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.insert_or_update_attributes
- description: 'Use case #2: Remove an attribute.'
@@ -695,6 +700,9 @@ medical-imaging_UpdateImageSetMetadata:
- description: 'Use case #3: Remove an instance.'
snippet_tags:
- medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.remove_instance
+ - description: 'Use case #4: Revert to an earlier version.'
+ snippet_tags:
+ - medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.revert
Java:
versions:
- sdk_version: 2
@@ -702,6 +710,7 @@ medical-imaging_UpdateImageSetMetadata:
github_note_at_bottom: true
excerpts:
- description:
+ genai: some
snippet_tags:
- medicalimaging.java2.update_image_set_metadata.main
- description: 'Use case #1: Insert or update an attribute.'
@@ -713,6 +722,9 @@ medical-imaging_UpdateImageSetMetadata:
- description: 'Use case #3: Remove an instance.'
snippet_tags:
- medicalimaging.java2.update_image_set_metadata.remove_instance
+ - description: 'Use case #4: Revert to a previous version.'
+ snippet_tags:
+ - medicalimaging.java2.update_image_set_metadata.revert
services:
medical-imaging: {UpdateImageSetMetadata}
medical-imaging_CopyImageSet:
@@ -724,18 +736,24 @@ medical-imaging_CopyImageSet:
github_note_at_bottom: true
excerpts:
- description: Utility function to copy an image set.
+ genai: some
snippet_tags:
- python.example_code.medical-imaging.MedicalImagingWrapper
- python.example_code.medical-imaging.CopyImageSet
- description: Copy an image set without a destination.
snippet_tags:
- python.example_code.medical-imaging.CopyImageSet1
- - python.example_code.medical-imaging.CopyImageSet3
+ - python.example_code.medical-imaging.CopyImageSet4
- description: Copy an image set with a destination.
snippet_tags:
- python.example_code.medical-imaging.CopyImageSet1
- python.example_code.medical-imaging.CopyImageSet2
+ - python.example_code.medical-imaging.CopyImageSet4
+ - description: Copy a subset of an image set.
+ snippet_tags:
+ - python.example_code.medical-imaging.CopyImageSet1
- python.example_code.medical-imaging.CopyImageSet3
+ - python.example_code.medical-imaging.CopyImageSet4
- description: >
The following code instantiates the MedicalImagingWrapper object.
snippet_tags:
@@ -747,6 +765,7 @@ medical-imaging_CopyImageSet:
github_note_at_bottom: true
excerpts:
- description: Utility function to copy an image set.
+ genai: some
snippet_tags:
- medical-imaging.JavaScript.imageset.copyImageSetV3
- description: Copy an image set without a destination.
@@ -755,6 +774,9 @@ medical-imaging_CopyImageSet:
- description: Copy an image set with a destination.
snippet_tags:
- medical-imaging.JavaScript.imageset.copyImageSetV3.with_destination
+ - description: Copy a subset of an image set with a destination and force the copy.
+ snippet_tags:
+ - medical-imaging.JavaScript.imageset.copyImageSetV3.with_destination_and_subsets
Java:
versions:
- sdk_version: 2
@@ -762,8 +784,12 @@ medical-imaging_CopyImageSet:
github_note_at_bottom: true
excerpts:
- description:
+ genai: some
snippet_tags:
- medicalimaging.java2.copy_imageset.main
+ - description: Utility function to create copiable attributes.
+ snippet_tags:
+ - medicalimaging.java2.copy_imageset.copiable_attributes
services:
medical-imaging: {CopyImageSet}
medical-imaging_DeleteImageSet:
diff --git a/javascriptv3/example_code/medical-imaging/actions/copy-image-set.js b/javascriptv3/example_code/medical-imaging/actions/copy-image-set.js
index c01f14061f5..6a045332865 100644
--- a/javascriptv3/example_code/medical-imaging/actions/copy-image-set.js
+++ b/javascriptv3/example_code/medical-imaging/actions/copy-image-set.js
@@ -1,11 +1,11 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
-import { fileURLToPath } from "url";
+import {fileURLToPath} from "url";
// snippet-start:[medical-imaging.JavaScript.imageset.copyImageSetV3]
-import { CopyImageSetCommand } from "@aws-sdk/client-medical-imaging";
-import { medicalImagingClient } from "../libs/medicalImagingClient.js";
+import {CopyImageSetCommand} from "@aws-sdk/client-medical-imaging";
+import {medicalImagingClient} from "../libs/medicalImagingClient.js";
/**
* @param {string} datastoreId - The ID of the data store.
@@ -13,90 +13,132 @@ import { medicalImagingClient } from "../libs/medicalImagingClient.js";
* @param {string} sourceVersionId - The source version ID.
* @param {string} destinationImageSetId - The optional ID of the destination image set.
* @param {string} destinationVersionId - The optional version ID of the destination image set.
+ * @param {boolean} force - Force the copy action.
+ * @param {[string]} copySubsets - A subset of instance IDs to copy.
*/
export const copyImageSet = async (
- datastoreId = "xxxxxxxxxxx",
- imageSetId = "xxxxxxxxxxxx",
- sourceVersionId = "1",
- destinationImageSetId = "",
- destinationVersionId = ""
+ datastoreId = "xxxxxxxxxxx",
+ imageSetId = "xxxxxxxxxxxx",
+ sourceVersionId = "1",
+ destinationImageSetId = "",
+ destinationVersionId = "",
+ force = false,
+ copySubsets = []
) => {
- const params = {
- datastoreId: datastoreId,
- sourceImageSetId: imageSetId,
- copyImageSetInformation: {
- sourceImageSet: { latestVersionId: sourceVersionId },
- },
- };
- if (destinationImageSetId !== "" && destinationVersionId !== "") {
- params.copyImageSetInformation.destinationImageSet = {
- imageSetId: destinationImageSetId,
- latestVersionId: destinationVersionId,
- };
- }
-
- const response = await medicalImagingClient.send(
- new CopyImageSetCommand(params)
- );
- console.log(response);
- // {
- // '$metadata': {
- // httpStatusCode: 200,
- // requestId: 'd9b219ce-cc48-4a44-a5b2-c5c3068f1ee8',
- // extendedRequestId: undefined,
- // cfId: undefined,
- // attempts: 1,
- // totalRetryDelay: 0
- // },
- // datastoreId: 'xxxxxxxxxxxxxx',
- // destinationImageSetProperties: {
- // createdAt: 2023-09-27T19:46:21.824Z,
- // imageSetArn: 'arn:aws:medical-imaging:us-east-1:xxxxxxxxxxx:datastore/xxxxxxxxxxxxx/imageset/xxxxxxxxxxxxxxxxxxx',
- // imageSetId: 'xxxxxxxxxxxxxxx',
- // imageSetState: 'LOCKED',
- // imageSetWorkflowStatus: 'COPYING',
- // latestVersionId: '1',
- // updatedAt: 2023-09-27T19:46:21.824Z
- // },
- // sourceImageSetProperties: {
- // createdAt: 2023-09-22T14:49:26.427Z,
- // imageSetArn: 'arn:aws:medical-imaging:us-east-1:xxxxxxxxxxx:datastore/xxxxxxxxxxxxx/imageset/xxxxxxxxxxxxxxxx',
- // imageSetId: 'xxxxxxxxxxxxxxxx',
- // imageSetState: 'LOCKED',
- // imageSetWorkflowStatus: 'COPYING_WITH_READ_ONLY_ACCESS',
- // latestVersionId: '4',
- // updatedAt: 2023-09-27T19:46:21.824Z
- // }
- // }
- return response;
+ try {
+ const params = {
+ datastoreId: datastoreId,
+ sourceImageSetId: imageSetId,
+ copyImageSetInformation: {
+ sourceImageSet: {latestVersionId: sourceVersionId},
+ },
+ force: force
+ };
+ if (destinationImageSetId !== "" && destinationVersionId !== "") {
+ params.copyImageSetInformation.destinationImageSet = {
+ imageSetId: destinationImageSetId,
+ latestVersionId: destinationVersionId,
+ };
+ }
+
+ if (copySubsets.length > 0) {
+ let copySubsetsJson;
+ copySubsetsJson = {
+ SchemaVersion: 1.1,
+ Study: {
+ Series: {
+ imageSetId: {
+ Instances: {}
+ }
+ }
+ }
+ };
+
+ for (let i = 0; i < copySubsets.length; i++) {
+ copySubsetsJson.Study.Series.imageSetId.Instances[
+ copySubsets[i]
+ ] = {};
+ }
+
+ params.copyImageSetInformation.dicomCopies = copySubsetsJson;
+ }
+
+ const response = await medicalImagingClient.send(
+ new CopyImageSetCommand(params)
+ );
+ console.log(response);
+ // {
+ // '$metadata': {
+ // httpStatusCode: 200,
+ // requestId: 'd9b219ce-cc48-4a44-a5b2-c5c3068f1ee8',
+ // extendedRequestId: undefined,
+ // cfId: undefined,
+ // attempts: 1,
+ // totalRetryDelay: 0
+ // },
+ // datastoreId: 'xxxxxxxxxxxxxx',
+ // destinationImageSetProperties: {
+ // createdAt: 2023-09-27T19:46:21.824Z,
+ // imageSetArn: 'arn:aws:medical-imaging:us-east-1:xxxxxxxxxxx:datastore/xxxxxxxxxxxxx/imageset/xxxxxxxxxxxxxxxxxxx',
+ // imageSetId: 'xxxxxxxxxxxxxxx',
+ // imageSetState: 'LOCKED',
+ // imageSetWorkflowStatus: 'COPYING',
+ // latestVersionId: '1',
+ // updatedAt: 2023-09-27T19:46:21.824Z
+ // },
+ // sourceImageSetProperties: {
+ // createdAt: 2023-09-22T14:49:26.427Z,
+ // imageSetArn: 'arn:aws:medical-imaging:us-east-1:xxxxxxxxxxx:datastore/xxxxxxxxxxxxx/imageset/xxxxxxxxxxxxxxxx',
+ // imageSetId: 'xxxxxxxxxxxxxxxx',
+ // imageSetState: 'LOCKED',
+ // imageSetWorkflowStatus: 'COPYING_WITH_READ_ONLY_ACCESS',
+ // latestVersionId: '4',
+ // updatedAt: 2023-09-27T19:46:21.824Z
+ // }
+ // }
+ return response;
+ } catch (err) {
+ console.error(err);
+ }
};
// snippet-end:[medical-imaging.JavaScript.imageset.copyImageSetV3]
// Invoke the following code if this file is being run directly.
if (process.argv[1] === fileURLToPath(import.meta.url)) {
- // snippet-start:[medical-imaging.JavaScript.imageset.copyImageSetV3.without_destination]
- try {
+ // snippet-start:[medical-imaging.JavaScript.imageset.copyImageSetV3.without_destination]
+
await copyImageSet(
- "12345678901234567890123456789012",
- "12345678901234567890123456789012",
- "1"
+ "12345678901234567890123456789012",
+ "12345678901234567890123456789012",
+ "1"
);
- } catch (err) {
- console.error(err);
- }
- // snippet-end:[medical-imaging.JavaScript.imageset.copyImageSetV3.without_destination]
+ // snippet-end:[medical-imaging.JavaScript.imageset.copyImageSetV3.without_destination]
+
+ // snippet-start:[medical-imaging.JavaScript.imageset.copyImageSetV3.with_destination]
- // snippet-start:[medical-imaging.JavaScript.imageset.copyImageSetV3.with_destination]
- try {
await copyImageSet(
- "12345678901234567890123456789012",
- "12345678901234567890123456789012",
- "4",
- "12345678901234567890123456789012",
- "1"
+ "12345678901234567890123456789012",
+ "12345678901234567890123456789012",
+ "1",
+ "12345678901234567890123456789012",
+ "1",
+ false,
);
- } catch (err) {
- console.error(err);
- }
- // snippet-end:[medical-imaging.JavaScript.imageset.copyImageSetV3.with_destination]
+
+ // snippet-end:[medical-imaging.JavaScript.imageset.copyImageSetV3.with_destination]
+
+ // snippet-start:[medical-imaging.JavaScript.imageset.copyImageSetV3.with_destination_and_subsets]
+
+ await copyImageSet(
+ "12345678901234567890123456789012",
+ "12345678901234567890123456789012",
+ "1",
+ "12345678901234567890123456789012",
+ "1",
+ true,
+ ["12345678901234567890123456789012", "11223344556677889900112233445566"]
+ );
+
+ // snippet-end:[medical-imaging.JavaScript.imageset.copyImageSetV3.with_destination_and_subsets]
+
}
diff --git a/javascriptv3/example_code/medical-imaging/actions/update-image-set-metadata.js b/javascriptv3/example_code/medical-imaging/actions/update-image-set-metadata.js
index 65a073b7ab0..c2133008006 100644
--- a/javascriptv3/example_code/medical-imaging/actions/update-image-set-metadata.js
+++ b/javascriptv3/example_code/medical-imaging/actions/update-image-set-metadata.js
@@ -16,38 +16,45 @@ import {medicalImagingClient} from "../libs/medicalImagingClient.js";
* @param {string} imageSetId - The ID of the HealthImaging image set.
* @param {string} latestVersionId - The ID of the HealthImaging image set version.
* @param {{}} updateMetadata - The metadata to update.
+ * @param {boolean} force - Force the update.
*/
export const updateImageSetMetadata = async (datastoreId = "xxxxxxxxxx",
imageSetId = "xxxxxxxxxx",
latestVersionId = "1",
- updateMetadata = '{}') => {
- const response = await medicalImagingClient.send(
- new UpdateImageSetMetadataCommand({
- datastoreId: datastoreId,
- imageSetId: imageSetId,
- latestVersionId: latestVersionId,
- updateImageSetMetadataUpdates: updateMetadata
- })
- );
- console.log(response);
- // {
- // '$metadata': {
- // httpStatusCode: 200,
- // requestId: '7966e869-e311-4bff-92ec-56a61d3003ea',
- // extendedRequestId: undefined,
- // cfId: undefined,
- // attempts: 1,
- // totalRetryDelay: 0
- // },
- // createdAt: 2023-09-22T14:49:26.427Z,
- // datastoreId: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
- // imageSetId: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
- // imageSetState: 'LOCKED',
- // imageSetWorkflowStatus: 'UPDATING',
- // latestVersionId: '4',
- // updatedAt: 2023-09-27T19:41:43.494Z
- // }
- return response;
+ updateMetadata = '{}',
+ force = false) => {
+ try {
+ const response = await medicalImagingClient.send(
+ new UpdateImageSetMetadataCommand({
+ datastoreId: datastoreId,
+ imageSetId: imageSetId,
+ latestVersionId: latestVersionId,
+ updateImageSetMetadataUpdates: updateMetadata,
+ force: force,
+ })
+ );
+ console.log(response);
+ // {
+ // '$metadata': {
+ // httpStatusCode: 200,
+ // requestId: '7966e869-e311-4bff-92ec-56a61d3003ea',
+ // extendedRequestId: undefined,
+ // cfId: undefined,
+ // attempts: 1,
+ // totalRetryDelay: 0
+ // },
+ // createdAt: 2023-09-22T14:49:26.427Z,
+ // datastoreId: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+ // imageSetId: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+ // imageSetState: 'LOCKED',
+ // imageSetWorkflowStatus: 'UPDATING',
+ // latestVersionId: '4',
+ // updatedAt: 2023-09-27T19:41:43.494Z
+ // }
+ return response;
+ } catch (err) {
+ console.error(err);
+ }
};
// snippet-end:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3]
@@ -58,7 +65,7 @@ if (process.argv[1] === fileURLToPath(import.meta.url)) {
const imageSetID = "12345678901234567890123456789012";
const versionID = "1";
const updateType = "insert"; // or "remove-attribute" or "remove_instance".
- if (updateType == "insert") {
+ if (updateType === "insert") {
// Insert or update an attribute.
// snippet-start:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.insert_or_update_attributes]
const insertAttributes =
@@ -79,9 +86,9 @@ if (process.argv[1] === fileURLToPath(import.meta.url)) {
};
await updateImageSetMetadata(datastoreID, imageSetID,
- versionID, updateMetadata);
+ versionID, updateMetadata, true);
// snippet-end:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.insert_or_update_attributes]
- } else if (updateType == "remove_attribute") {
+ } else if (updateType === "remove_attribute") {
// Remove an existing attribute.
// snippet-start:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.remove_attributes]
// Attribute key and value must match the existing attribute.
@@ -105,7 +112,7 @@ if (process.argv[1] === fileURLToPath(import.meta.url)) {
await updateImageSetMetadata(datastoreID, imageSetID,
versionID, updateMetadata);
// snippet-end:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.remove_attributes]
- } else if (updateType == "remove_instance") {
+ } else if (updateType === "remove_instance") {
// Remove an existing instance.
// snippet-start:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.remove_instance]
const remove_instance =
@@ -132,6 +139,16 @@ if (process.argv[1] === fileURLToPath(import.meta.url)) {
await updateImageSetMetadata(datastoreID, imageSetID,
versionID, updateMetadata);
// snippet-end:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.remove_instance]
- }
+ } else if (updateType === "revert") {
+ // Remove an existing instance.
+// snippet-start:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.revert]
+ const updateMetadata = {
+ "revertToVersionId": "1"
+ };
+
+ await updateImageSetMetadata(datastoreID, imageSetID,
+ versionID, updateMetadata);
+// snippet-end:[medical-imaging.JavaScript.datastore.updateImageSetMetadataV3.revert]
+}
}
diff --git a/javascriptv3/example_code/medical-imaging/package.json b/javascriptv3/example_code/medical-imaging/package.json
index 45bc8f62c23..72e664b221f 100644
--- a/javascriptv3/example_code/medical-imaging/package.json
+++ b/javascriptv3/example_code/medical-imaging/package.json
@@ -4,10 +4,10 @@
"author": "Steven Meyer {
- const actual = await vi.importActual("@aws-sdk/client-medical-imaging");
- return {
- ...actual,
- MedicalImagingClient: class {
- send = send;
- },
- };
+ const actual = await vi.importActual("@aws-sdk/client-medical-imaging");
+ return {
+ ...actual,
+ MedicalImagingClient: class {
+ send = send;
+ },
+ };
});
-const { updateImageSetMetadata } = await import(
- "../actions/update-image-set-metadata.js"
-);
+const {updateImageSetMetadata} = await import(
+ "../actions/update-image-set-metadata.js"
+ );
describe("update-image-set-metadata", () => {
- it("should log the response", async () => {
- const logSpy = vi.spyOn(console, "log");
- const datastoreId = "12345678901234567890123456789012";
- const imageSetId = "12345678901234567890123456789012";
- const versionID = "1";
- const updatableAttributes = JSON.stringify({
- SchemaVersion: 1.1,
- Patient: {
- DICOM: {
- PatientName: "Garcia^Gloria",
- },
- },
- });
+ it("should log the response", async () => {
+ const logSpy = vi.spyOn(console, "log");
+ const datastoreId = "12345678901234567890123456789012";
+ const imageSetId = "12345678901234567890123456789012";
+ const versionID = "1";
+ const updatableAttributes = JSON.stringify({
+ SchemaVersion: 1.1,
+ Patient: {
+ DICOM: {
+ PatientName: "Garcia^Gloria",
+ },
+ },
+ });
- const updateMetadata = {
- DICOMUpdates: {
- updatableAttributes: new TextEncoder().encode(updatableAttributes),
- },
- };
- const response = {
- $metadata: {
- httpStatusCode: 200,
- requestId: "7966e869-e311-4bff-92ec-56a61d3003ea",
- extendedRequestId: undefined,
- cfId: undefined,
- attempts: 1,
- totalRetryDelay: 0,
- },
- createdAt: "2023-09-22T14:49:26.427Z",
- datastoreId: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
- imageSetId: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
- imageSetState: "LOCKED",
- imageSetWorkflowStatus: "UPDATING",
- latestVersionId: "4",
- updatedAt: "2023-09-27T19:41:43.494Z",
- };
+ const updateMetadata = {
+ DICOMUpdates: {
+ updatableAttributes: new TextEncoder().encode(updatableAttributes),
+ },
+ };
+ const response = {
+ $metadata: {
+ httpStatusCode: 200,
+ requestId: "7966e869-e311-4bff-92ec-56a61d3003ea",
+ extendedRequestId: undefined,
+ cfId: undefined,
+ attempts: 1,
+ totalRetryDelay: 0,
+ },
+ createdAt: "2023-09-22T14:49:26.427Z",
+ datastoreId: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ imageSetId: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ imageSetState: "LOCKED",
+ imageSetWorkflowStatus: "UPDATING",
+ latestVersionId: "4",
+ updatedAt: "2023-09-27T19:41:43.494Z",
+ };
- send.mockResolvedValueOnce(response);
+ send.mockResolvedValueOnce(response);
- await updateImageSetMetadata(
- datastoreId,
- imageSetId,
- versionID,
- updateMetadata
- );
+ await updateImageSetMetadata(
+ datastoreId,
+ imageSetId,
+ versionID,
+ updateMetadata,
+ true
+ );
- expect(logSpy).toHaveBeenCalledWith(response);
- });
+ expect(logSpy).toHaveBeenCalledWith(response);
+ });
});
diff --git a/javav2/example_code/medicalimaging/README.md b/javav2/example_code/medicalimaging/README.md
index 339cc4a7781..2e2a4e64cf2 100644
--- a/javav2/example_code/medicalimaging/README.md
+++ b/javav2/example_code/medicalimaging/README.md
@@ -33,7 +33,7 @@ For prerequisites, see the [README](../../README.md#Prerequisites) in the `javav
Code excerpts that show you how to call individual service functions.
-- [CopyImageSet](src/main/java/com/example/medicalimaging/CopyImageSet.java#L65)
+- [CopyImageSet](src/main/java/com/example/medicalimaging/CopyImageSet.java#L132)
- [CreateDatastore](src/main/java/com/example/medicalimaging/CreateDatastore.java#L52)
- [DeleteDatastore](src/main/java/com/example/medicalimaging/DeleteDatastore.java#L50)
- [DeleteImageSet](src/main/java/com/example/medicalimaging/DeleteImageSet.java#L53)
@@ -50,7 +50,7 @@ Code excerpts that show you how to call individual service functions.
- [StartDICOMImportJob](src/main/java/com/example/medicalimaging/StartDicomImportJob.java#L65)
- [TagResource](src/main/java/com/example/medicalimaging/TagResource.java#L54)
- [UntagResource](src/main/java/com/example/medicalimaging/UntagResource.java#L54)
-- [UpdateImageSetMetadata](src/main/java/com/example/medicalimaging/UpdateImageSetMetadata.java#L144)
+- [UpdateImageSetMetadata](src/main/java/com/example/medicalimaging/UpdateImageSetMetadata.java#L163)
### Scenarios
diff --git a/javav2/example_code/medicalimaging/pom.xml b/javav2/example_code/medicalimaging/pom.xml
index bf1a283a6e0..ef3b9b979e6 100644
--- a/javav2/example_code/medicalimaging/pom.xml
+++ b/javav2/example_code/medicalimaging/pom.xml
@@ -8,8 +8,8 @@
1.0-SNAPSHOTUTF-8
- 1.8
- 2.25.25
+ 17
+ 2.26.24
@@ -18,8 +18,8 @@
maven-compiler-plugin3.1
-
- ${java.version}
+
+ 15
@@ -50,10 +50,6 @@
5.9.2test
-
- software.amazon.awssdk
- secretsmanager
- com.google.code.gsongson
@@ -65,7 +61,7 @@
5.9.2test
-
+ org.junit.platformjunit-platform-commons1.9.2
@@ -90,5 +86,14 @@
software.amazon.awssdks3
+
+ software.amazon.awssdk
+ secretsmanager
+
+
+ commons-cli
+ commons-cli
+ 1.8.0
+
\ No newline at end of file
diff --git a/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/CopyImageSet.java b/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/CopyImageSet.java
index c8aad5f2626..537e4c4528d 100644
--- a/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/CopyImageSet.java
+++ b/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/CopyImageSet.java
@@ -2,13 +2,23 @@
// SPDX-License-Identifier: Apache-2.0
package com.example.medicalimaging;
+import org.apache.commons.cli.*;
// snippet-start:[medicalimaging.java2.copy_imageset.import]
import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.medicalimaging.MedicalImagingClient;
-import software.amazon.awssdk.services.medicalimaging.model.*;
+import software.amazon.awssdk.services.medicalimaging.model.CopyDestinationImageSet;
+import software.amazon.awssdk.services.medicalimaging.model.CopyImageSetInformation;
+import software.amazon.awssdk.services.medicalimaging.model.CopyImageSetRequest;
+import software.amazon.awssdk.services.medicalimaging.model.CopyImageSetResponse;
+import software.amazon.awssdk.services.medicalimaging.model.CopySourceImageSetInformation;
+import software.amazon.awssdk.services.medicalimaging.model.MedicalImagingException;
+import software.amazon.awssdk.services.medicalimaging.model.MetadataCopies;
+
+import java.util.Collections;
+import java.util.Vector;
// snippet-end:[medicalimaging.java2.copy_imageset.import]
@@ -20,33 +30,87 @@
*
* https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html
*/
+
public class CopyImageSet {
+ /**
+ * The main function.
+ *
+ * usage: Copy Image Set
+ * -c One or more comma separated optional subsets to copy
+ * -d The ID of the data store
+ * -di The optional destination image set ID
+ * -dv The optional destination version ID
+ * -f The optional force copy flag
+ * -h Prints this help message
+ * -i The ID of the image set
+ * -v The latest version ID of the image set
+ */
public static void main(String[] args) {
- final String usage = "\n" +
- "Usage:\n" +
- " \n\n" +
- "Where:\n" +
- " datastoreId - The ID of the data store.\n" +
- " imageSetId - The ID of the image set.\n" +
- " latestVersionId - The latest version ID of the image set.\n" +
- " destinationImageSetId - The optional destination image set ID.\n" +
- " destinationVersionId - The optional destination version ID.\n";
-
- if ((args.length != 3) && (args.length != 5)) {
- System.out.println(usage);
- System.exit(1);
- }
-
- String datastoreId = args[0];
- String imageSetId = args[1];
- String versionId = args[2];
+ Options options = new Options();
+ Option datastoreIdOption = Option.builder("d").hasArg().desc("The ID of the data store").build();
+ Option imageSetIdOption = Option.builder("i").hasArg().desc("The ID of the image set").build();
+ Option latestVersionIdOption = Option.builder("v").hasArg().desc("The latest version ID of the image set").build();
+ Option destinationImageSetIdOption = Option.builder("di").hasArg().desc("The optional destination image set ID").build();
+ Option destinationVersionIdOption = Option.builder("dv").hasArg().desc("The optional destination version ID").build();
+ Option forceOption = Option.builder("f").desc("The optional force copy flag").build();
+ Option copySubset = Option.builder("c").hasArg().desc("One or more comma separated optional subsets to copy").build();
+ Option help = Option.builder("h").desc("Prints this help message").build();
+ options.addOption(datastoreIdOption);
+ options.addOption(imageSetIdOption);
+ options.addOption(latestVersionIdOption);
+ options.addOption(destinationImageSetIdOption);
+ options.addOption(destinationVersionIdOption);
+ options.addOption(forceOption);
+ options.addOption(copySubset);
+ options.addOption(help);
+ CommandLineParser parser = new DefaultParser();
+
+ String datastoreId = null;
+ String imageSetId = null;
+ String versionId = null;
String destinationImageSetId = null;
String destinationVersionId = null;
+ boolean force = false;
+ Vector subsets = null;
+ try {
+ CommandLine cmd = parser.parse(options, args);
+
+ if (cmd.hasOption(help)) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("Copy Image Set", options);
+ System.exit(0);
+ }
- if (args.length == 5) {
- destinationImageSetId = args[3];
- destinationVersionId = args[4];
+ datastoreId = cmd.getOptionValue(datastoreIdOption);
+ imageSetId = cmd.getOptionValue(imageSetIdOption);
+ versionId = cmd.getOptionValue(latestVersionIdOption);
+ destinationImageSetId = cmd.getOptionValue(destinationImageSetIdOption);
+ destinationVersionId = cmd.getOptionValue(destinationVersionIdOption);
+ force = cmd.hasOption(forceOption);
+
+ if (cmd.hasOption(copySubset)) {
+ subsets = new Vector<>();
+ String commaSeparatedSubsets = cmd.getOptionValue(copySubset);
+ String[] subsetsArray = commaSeparatedSubsets.split(",");
+ Collections.addAll(subsets, subsetsArray);
+ }
+
+ } catch (UnrecognizedOptionException ex) {
+ System.out.println(ex.getMessage());
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("Copy Image Set", options);
+ System.exit(1);
+ } catch (ParseException e) {
+ System.err.println(e.getLocalizedMessage());
+ System.exit(1);
+ }
+
+ if (datastoreId == null || imageSetId == null || versionId == null) {
+ System.err.println("Data store ID, image set ID, and version ID are required");
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("Copy Image Set", options);
+ System.exit(1);
}
Region region = Region.US_WEST_2;
@@ -54,30 +118,58 @@ public static void main(String[] args) {
.region(region)
.credentialsProvider(ProfileCredentialsProvider.create())
.build();
+ try {
+ String copiedImageSetId = copyMedicalImageSet(medicalImagingClient, datastoreId, imageSetId,
+ versionId, destinationImageSetId, destinationVersionId, force, subsets);
- String copiedImageSetId = copyMedicalImageSet(medicalImagingClient, datastoreId, imageSetId,
- versionId, destinationImageSetId, destinationVersionId);
-
- System.out.println("The copied image set ID is " + copiedImageSetId);
+ System.out.println("The copied image set ID is " + copiedImageSetId);
+ } catch (MedicalImagingException e) {
+ System.err.println(e.awsErrorDetails().errorMessage());
+ }
medicalImagingClient.close();
}
// snippet-start:[medicalimaging.java2.copy_imageset.main]
+
+ /**
+ * Copy an AWS HealthImaging image set.
+ *
+ * @param medicalImagingClient - The AWS HealthImaging client object.
+ * @param datastoreId - The datastore ID.
+ * @param imageSetId - The image set ID.
+ * @param latestVersionId - The version ID.
+ * @param destinationImageSetId - The optional destination image set ID, ignored if null.
+ * @param destinationVersionId - The optional destination version ID, ignored if null.
+ * @param force - The force flag.
+ * @param subsets - The optional subsets to copy, ignored if null.
+ * @return - The image set ID of the copy.
+ * @throws MedicalImagingException - Base exception for all service exceptions thrown by AWS HealthImaging.
+ */
public static String copyMedicalImageSet(MedicalImagingClient medicalImagingClient,
- String datastoreId,
- String imageSetId,
- String latestVersionId,
- String destinationImageSetId,
- String destinationVersionId) {
+ String datastoreId,
+ String imageSetId,
+ String latestVersionId,
+ String destinationImageSetId,
+ String destinationVersionId,
+ boolean force,
+ Vector subsets) {
try {
- CopySourceImageSetInformation copySourceImageSetInformation = CopySourceImageSetInformation.builder()
- .latestVersionId(latestVersionId)
- .build();
+ CopySourceImageSetInformation.Builder copySourceImageSetInformation = CopySourceImageSetInformation.builder()
+ .latestVersionId(latestVersionId);
+
+ // Optionally copy a subset of image instances.
+ if (subsets != null) {
+ String subsetInstanceToCopy = getCopiableAttributesJSON(imageSetId, subsets);
+ copySourceImageSetInformation.dicomCopies(MetadataCopies.builder()
+ .copiableAttributes(subsetInstanceToCopy)
+ .build());
+ }
CopyImageSetInformation.Builder copyImageSetBuilder = CopyImageSetInformation.builder()
- .sourceImageSet(copySourceImageSetInformation);
+ .sourceImageSet(copySourceImageSetInformation.build());
+ // Optionally designate a destination image set.
if (destinationImageSetId != null) {
copyImageSetBuilder = copyImageSetBuilder.destinationImageSet(CopyDestinationImageSet.builder()
.imageSetId(destinationImageSetId)
@@ -89,6 +181,7 @@ public static String copyMedicalImageSet(MedicalImagingClient medicalImagingClie
.datastoreId(datastoreId)
.sourceImageSetId(imageSetId)
.copyImageSetInformation(copyImageSetBuilder.build())
+ .force(force)
.build();
CopyImageSetResponse response = medicalImagingClient.copyImageSet(copyImageSetRequest);
@@ -96,10 +189,52 @@ public static String copyMedicalImageSet(MedicalImagingClient medicalImagingClie
return response.destinationImageSetProperties().imageSetId();
} catch (MedicalImagingException e) {
System.err.println(e.awsErrorDetails().errorMessage());
- System.exit(1);
+ throw e;
}
-
- return "";
}
// snippet-end:[medicalimaging.java2.copy_imageset.main]
+
+ // snippet-start:[medicalimaging.java2.copy_imageset.copiable_attributes]
+
+ /**
+ * Create a JSON string of copiable image instances.
+ *
+ * @param imageSetId - The image set ID.
+ * @param subsets - The subsets to copy.
+ * @return A JSON string of copiable image instances.
+ */
+ private static String getCopiableAttributesJSON(String imageSetId, Vector subsets) {
+ StringBuilder subsetInstanceToCopy = new StringBuilder(
+ """
+ {
+ "SchemaVersion": 1.1,
+ "Study": {
+ "Series": {
+ "
+ """
+ );
+
+ subsetInstanceToCopy.append(imageSetId);
+
+ subsetInstanceToCopy.append(
+ """
+ ": {
+ "Instances": {
+ """
+ );
+
+ for (String subset : subsets) {
+ subsetInstanceToCopy.append('"' + subset + "\": {},");
+ }
+ subsetInstanceToCopy.deleteCharAt(subsetInstanceToCopy.length() - 1);
+ subsetInstanceToCopy.append("""
+ }
+ }
+ }
+ }
+ }
+ """);
+ return subsetInstanceToCopy.toString();
+ }
+ // snippet-end:[medicalimaging.java2.copy_imageset.copiable_attributes]
}
diff --git a/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/SearchImageSets.java b/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/SearchImageSets.java
index f5e88eb4a03..0cd11ca7e28 100644
--- a/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/SearchImageSets.java
+++ b/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/SearchImageSets.java
@@ -50,7 +50,7 @@ public static void main(String[] args) {
String patientId = args[1];
String seriesInstanceUID = args[2];
- Region region = Region.US_EAST_1;
+ Region region = Region.US_WEST_2;
MedicalImagingClient medicalImagingClient = MedicalImagingClient.builder()
.region(region)
.credentialsProvider(ProfileCredentialsProvider.create())
diff --git a/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/UpdateImageSetMetadata.java b/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/UpdateImageSetMetadata.java
index 0e13b77b1f2..a3b2a4b6582 100644
--- a/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/UpdateImageSetMetadata.java
+++ b/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/UpdateImageSetMetadata.java
@@ -29,14 +29,15 @@ public class UpdateImageSetMetadata {
public static void main(String[] args) {
final String usage = "\n" +
"Usage:\n" +
- " n\n" +
+ " [force]n\n" +
"Where:\n" +
" datastoreId - The ID of the data store.\n" +
" imagesetId - The ID of the image set.\n" +
" versionId - The latest version ID of the image set.\n" +
- " updateType - Choice of (insert | remove_attribute | remove_instance) for update type.\n";
+ " updateType - Choice of (insert | remove_attribute | remove_instance | revert) for update type.\n" +
+ " force - Optional force (true | false) when updating.\n";
- if (args.length != 4) {
+ if (args.length != 4 && args.length != 5) {
System.out.println(usage);
System.exit(1);
}
@@ -45,115 +46,147 @@ public static void main(String[] args) {
String imagesetId = args[1];
String versionid = args[2];
String updateType = args[3];
+ boolean force = false;
+ if (args.length == 5) {
+ force = args[4].equals("true");
+ }
- if (!updateType.equals("insert") && !updateType.equals("remove_attribute") && !updateType.equals("remove_instance")) {
+ if (!updateType.equals("insert") && !updateType.equals("remove_attribute") && !updateType.equals("remove_instance")
+ && !updateType.equals("revert")) {
System.out.println("Invalid update type, '" + updateType + "'.");
System.out.println(usage);
System.exit(1);
}
- Region region = Region.US_EAST_1;
+ Region region = Region.US_WEST_2;
MedicalImagingClient medicalImagingClient = MedicalImagingClient.builder()
.region(region)
.credentialsProvider(ProfileCredentialsProvider.create())
.build();
- if (updateType.equals("insert")) {
- // Add a new attribute or update an existing attribute.
- // snippet-start:[medicalimaging.java2.update_image_set_metadata.insert_or_update_attributes]
- final String insertAttributes = """
- {
- "SchemaVersion": 1.1,
- "Study": {
- "DICOM": {
- "StudyDescription": "CT CHEST"
- }
- }
- }
- """;
- MetadataUpdates metadataInsertUpdates = MetadataUpdates.builder()
- .dicomUpdates(DICOMUpdates.builder()
- .updatableAttributes(SdkBytes.fromByteBuffer(
- ByteBuffer.wrap(insertAttributes
- .getBytes(StandardCharsets.UTF_8))))
- .build())
- .build();
-
- updateMedicalImageSetMetadata(medicalImagingClient, datastoreId, imagesetId,
- versionid, metadataInsertUpdates);
- // snippet-end:[medicalimaging.java2.update_image_set_metadata.insert_or_update_attributes]
- } else if (updateType.equals("remove_attribute")) {
- // Remove an attribute.
- // snippet-start:[medicalimaging.java2.update_image_set_metadata.remove_attributes]
- final String removeAttributes = """
- {
- "SchemaVersion": 1.1,
- "Study": {
- "DICOM": {
- "StudyDescription": "CT CHEST"
+ try {
+ if (updateType.equals("insert")) {
+ // Add a new attribute or update an existing attribute.
+ // snippet-start:[medicalimaging.java2.update_image_set_metadata.insert_or_update_attributes]
+ final String insertAttributes = """
+ {
+ "SchemaVersion": 1.1,
+ "Study": {
+ "DICOM": {
+ "StudyDescription": "CT CHEST"
+ }
+ }
}
- }
- }
- """;
- MetadataUpdates metadataRemoveUpdates = MetadataUpdates.builder()
- .dicomUpdates(DICOMUpdates.builder()
- .removableAttributes(SdkBytes.fromByteBuffer(
- ByteBuffer.wrap(removeAttributes
- .getBytes(StandardCharsets.UTF_8))))
- .build())
- .build();
-
- updateMedicalImageSetMetadata(medicalImagingClient, datastoreId, imagesetId,
- versionid, metadataRemoveUpdates);
- // snippet-end:[medicalimaging.java2.update_image_set_metadata.remove_attributes]
- } else if (updateType.equals("remove_instance")) {
- // Remove an instance.
- // snippet-start:[medicalimaging.java2.update_image_set_metadata.remove_instance]
- final String removeInstance = """
- {
- "SchemaVersion": 1.1,
- "Study": {
- "Series": {
- "1.1.1.1.1.1.12345.123456789012.123.12345678901234.1": {
- "Instances": {
- "1.1.1.1.1.1.12345.123456789012.123.12345678901234.1": {}
+ """;
+ MetadataUpdates metadataInsertUpdates = MetadataUpdates.builder()
+ .dicomUpdates(DICOMUpdates.builder()
+ .updatableAttributes(SdkBytes.fromByteBuffer(
+ ByteBuffer.wrap(insertAttributes
+ .getBytes(StandardCharsets.UTF_8))))
+ .build())
+ .build();
+
+ updateMedicalImageSetMetadata(medicalImagingClient, datastoreId, imagesetId,
+ versionid, metadataInsertUpdates, force);
+ // snippet-end:[medicalimaging.java2.update_image_set_metadata.insert_or_update_attributes]
+ } else if (updateType.equals("remove_attribute")) {
+ // Remove an attribute.
+ // snippet-start:[medicalimaging.java2.update_image_set_metadata.remove_attributes]
+ final String removeAttributes = """
+ {
+ "SchemaVersion": 1.1,
+ "Study": {
+ "DICOM": {
+ "StudyDescription": "CT CHEST"
}
}
}
- }
- }
- """;
- MetadataUpdates metadataRemoveUpdates = MetadataUpdates.builder()
- .dicomUpdates(DICOMUpdates.builder()
- .removableAttributes(SdkBytes.fromByteBuffer(
- ByteBuffer.wrap(removeInstance
- .getBytes(StandardCharsets.UTF_8))))
- .build())
- .build();
-
- updateMedicalImageSetMetadata(medicalImagingClient, datastoreId, imagesetId,
- versionid, metadataRemoveUpdates);
- // snippet-end:[medicalimaging.java2.update_image_set_metadata.remove_instance]
+ """;
+ MetadataUpdates metadataRemoveUpdates = MetadataUpdates.builder()
+ .dicomUpdates(DICOMUpdates.builder()
+ .removableAttributes(SdkBytes.fromByteBuffer(
+ ByteBuffer.wrap(removeAttributes
+ .getBytes(StandardCharsets.UTF_8))))
+ .build())
+ .build();
+
+ updateMedicalImageSetMetadata(medicalImagingClient, datastoreId, imagesetId,
+ versionid, metadataRemoveUpdates, force);
+ // snippet-end:[medicalimaging.java2.update_image_set_metadata.remove_attributes]
+ } else if (updateType.equals("remove_instance")) {
+ // Remove an instance.
+ // snippet-start:[medicalimaging.java2.update_image_set_metadata.remove_instance]
+ final String removeInstance = """
+ {
+ "SchemaVersion": 1.1,
+ "Study": {
+ "Series": {
+ "1.1.1.1.1.1.12345.123456789012.123.12345678901234.1": {
+ "Instances": {
+ "1.1.1.1.1.1.12345.123456789012.123.12345678901234.1": {}
+ }
+ }
+ }
+ }
+ }
+ """;
+ MetadataUpdates metadataRemoveUpdates = MetadataUpdates.builder()
+ .dicomUpdates(DICOMUpdates.builder()
+ .removableAttributes(SdkBytes.fromByteBuffer(
+ ByteBuffer.wrap(removeInstance
+ .getBytes(StandardCharsets.UTF_8))))
+ .build())
+ .build();
+
+ updateMedicalImageSetMetadata(medicalImagingClient, datastoreId, imagesetId,
+ versionid, metadataRemoveUpdates, force);
+ // snippet-end:[medicalimaging.java2.update_image_set_metadata.remove_instance]
+ } else if (updateType.equals("revert")) {
+ // snippet-start:[medicalimaging.java2.update_image_set_metadata.revert]
+ // In this case, revert to previous version.
+ String revertVersionId = Integer.toString(Integer.parseInt(versionid) - 1);
+ MetadataUpdates metadataRemoveUpdates = MetadataUpdates.builder()
+ .revertToVersionId(revertVersionId)
+ .build();
+ updateMedicalImageSetMetadata(medicalImagingClient, datastoreId, imagesetId,
+ versionid, metadataRemoveUpdates, force);
+ // snippet-end:[medicalimaging.java2.update_image_set_metadata.revert]
+ }
+ } catch (MedicalImagingException e) {
+ System.err.println(e.awsErrorDetails().errorMessage());
}
-
medicalImagingClient.close();
}
// snippet-start:[medicalimaging.java2.update_image_set_metadata.main]
+
+ /**
+ * Update the metadata of an AWS HealthImaging image set.
+ *
+ * @param medicalImagingClient - The AWS HealthImaging client object.
+ * @param datastoreId - The datastore ID.
+ * @param imageSetId - The image set ID.
+ * @param versionId - The version ID.
+ * @param metadataUpdates - A MetadataUpdates object containing the updates.
+ * @param force - The force flag.
+ * @throws MedicalImagingException - Base exception for all service exceptions thrown by AWS HealthImaging.
+ */
public static void updateMedicalImageSetMetadata(MedicalImagingClient medicalImagingClient,
String datastoreId,
- String imagesetId,
+ String imageSetId,
String versionId,
- MetadataUpdates metadataUpdates) {
+ MetadataUpdates metadataUpdates,
+ boolean force) {
try {
UpdateImageSetMetadataRequest updateImageSetMetadataRequest = UpdateImageSetMetadataRequest
.builder()
.datastoreId(datastoreId)
- .imageSetId(imagesetId)
+ .imageSetId(imageSetId)
.latestVersionId(versionId)
.updateImageSetMetadataUpdates(metadataUpdates)
+ .force(force)
.build();
UpdateImageSetMetadataResponse response = medicalImagingClient.updateImageSetMetadata(updateImageSetMetadataRequest);
@@ -161,7 +194,7 @@ public static void updateMedicalImageSetMetadata(MedicalImagingClient medicalIma
System.out.println("The image set metadata was updated" + response);
} catch (MedicalImagingException e) {
System.err.println(e.awsErrorDetails().errorMessage());
- System.exit(1);
+ throw e;
}
}
// snippet-end:[medicalimaging.java2.update_image_set_metadata.main]
diff --git a/javav2/example_code/medicalimaging/src/test/java/AWSMedicalImagingTest.java b/javav2/example_code/medicalimaging/src/test/java/AWSMedicalImagingTest.java
index 64b0bc18670..09b63ccc5a2 100644
--- a/javav2/example_code/medicalimaging/src/test/java/AWSMedicalImagingTest.java
+++ b/javav2/example_code/medicalimaging/src/test/java/AWSMedicalImagingTest.java
@@ -142,18 +142,7 @@ public void listDatastoresTest() {
}
- @Test
- @Tag("IntegrationTest")
- @Order(4)
- public void getDicomImportJobTest() {
- final DICOMImportJobProperties[] dicomImportJobSummaries = { null };
- assertDoesNotThrow(() -> dicomImportJobSummaries[0] = GetDicomImportJob.getDicomImportJob(medicalImagingClient,
- workingDatastoreId, importJobId));
- assertNotNull(dicomImportJobSummaries[0]);
-
- System.out.println("Test 4 passed");
- }
-
+
@Test
@Tag("IntegrationTest")
@Order(5)
@@ -182,10 +171,13 @@ public void searchImageSetsTest() {
.build())
.build());
+ SearchCriteria searchCriteria = SearchCriteria.builder()
+ .filters(searchFilters)
+ .build();
@SuppressWarnings("rawtypes")
final List[] searchResults = { null };
assertDoesNotThrow(() -> searchResults[0] = SearchImageSets.searchMedicalImagingImageSets(medicalImagingClient,
- workingDatastoreId, searchFilters));
+ workingDatastoreId, searchCriteria));
assertNotNull(searchResults[0]);
System.out.println("Test 6 passed");
diff --git a/python/example_code/medical-imaging/README.md b/python/example_code/medical-imaging/README.md
index 1d91c6c05bf..90b913b7dc5 100644
--- a/python/example_code/medical-imaging/README.md
+++ b/python/example_code/medical-imaging/README.md
@@ -43,10 +43,10 @@ python -m pip install -r requirements.txt
Code excerpts that show you how to call individual service functions.
-- [CopyImageSet](medical_imaging_basics.py#L415)
+- [CopyImageSet](medical_imaging_basics.py#L417)
- [CreateDatastore](medical_imaging_basics.py#L31)
- [DeleteDatastore](medical_imaging_basics.py#L104)
-- [DeleteImageSet](medical_imaging_basics.py#L466)
+- [DeleteImageSet](medical_imaging_basics.py#L489)
- [GetDICOMImportJob](medical_imaging_basics.py#L158)
- [GetDatastore](medical_imaging_basics.py#L54)
- [GetImageFrame](medical_imaging_basics.py#L318)
@@ -55,11 +55,11 @@ Code excerpts that show you how to call individual service functions.
- [ListDICOMImportJobs](medical_imaging_basics.py#L183)
- [ListDatastores](medical_imaging_basics.py#L79)
- [ListImageSetVersions](medical_imaging_basics.py#L350)
-- [ListTagsForResource](medical_imaging_basics.py#L533)
+- [ListTagsForResource](medical_imaging_basics.py#L556)
- [SearchImageSets](medical_imaging_basics.py#L211)
- [StartDICOMImportJob](medical_imaging_basics.py#L124)
-- [TagResource](medical_imaging_basics.py#L491)
-- [UntagResource](medical_imaging_basics.py#L511)
+- [TagResource](medical_imaging_basics.py#L514)
+- [UntagResource](medical_imaging_basics.py#L534)
- [UpdateImageSetMetadata](medical_imaging_basics.py#L381)
### Scenarios
diff --git a/python/example_code/medical-imaging/medical_imaging_basics.py b/python/example_code/medical-imaging/medical_imaging_basics.py
index 1555bc81f9a..b870ca12ced 100644
--- a/python/example_code/medical-imaging/medical_imaging_basics.py
+++ b/python/example_code/medical-imaging/medical_imaging_basics.py
@@ -380,7 +380,7 @@ def list_image_set_versions(self, datastore_id, image_set_id):
# snippet-start:[python.example_code.medical-imaging.UpdateImageSetMetadata]
def update_image_set_metadata(
- self, datastore_id, image_set_id, version_id, metadata
+ self, datastore_id, image_set_id, version_id, metadata, force=False
):
"""
Update the metadata of an image set.
@@ -391,6 +391,7 @@ def update_image_set_metadata(
:param metadata: The image set metadata as a dictionary.
For example {"DICOMUpdates": {"updatableAttributes":
"{\"SchemaVersion\":1.1,\"Patient\":{\"DICOM\":{\"PatientName\":\"Garcia^Gloria\"}}}"}}
+ :param: force: Force the update.
:return: The updated image set metadata.
"""
try:
@@ -399,6 +400,7 @@ def update_image_set_metadata(
datastoreId=datastore_id,
latestVersionId=version_id,
updateImageSetMetadataUpdates=metadata,
+ force=force,
)
except ClientError as err:
logger.error(
@@ -420,6 +422,8 @@ def copy_image_set(
version_id,
destination_image_set_id=None,
destination_version_id=None,
+ force=False,
+ subsets=[],
):
"""
Copy an image set.
@@ -429,6 +433,8 @@ def copy_image_set(
:param version_id: The ID of the image set version.
:param destination_image_set_id: The ID of the optional destination image set.
:param destination_version_id: The ID of the optional destination image set version.
+ :param force: Force the copy.
+ :param subsets: The optional subsets to copy. For example: ["12345678901234567890123456789012"].
:return: The copied image set ID.
"""
try:
@@ -445,12 +451,29 @@ def copy_image_set(
}
# snippet-end:[python.example_code.medical-imaging.CopyImageSet2]
# snippet-start:[python.example_code.medical-imaging.CopyImageSet3]
+ if len(subsets) > 0:
+ copySubsetsJson = {
+ "SchemaVersion": "1.1",
+ "Study": {"Series": {"imageSetId": {"Instances": {}}}},
+ }
+
+ for subset in subsets:
+ copySubsetsJson["Study"]["Series"]["imageSetId"]["Instances"][
+ subset
+ ] = {}
+
+ copy_image_set_information["sourceImageSet"]["DICOMCopies"] = {
+ "copiableAttributes": json.dumps(copySubsetsJson)
+ }
+ # snippet-end:[python.example_code.medical-imaging.CopyImageSet3]
+ # snippet-start:[python.example_code.medical-imaging.CopyImageSet4]
copy_results = self.health_imaging_client.copy_image_set(
datastoreId=datastore_id,
sourceImageSetId=image_set_id,
copyImageSetInformation=copy_image_set_information,
+ force=force,
)
- # snippet-end:[python.example_code.medical-imaging.CopyImageSet3]
+ # snippet-end:[python.example_code.medical-imaging.CopyImageSet4]
except ClientError as err:
logger.error(
"Couldn't copy image set. Here's why: %s: %s",
@@ -833,7 +856,8 @@ def update_image_set_metadata_demo(self):
data_store_id = "12345678901234567890123456789012"
image_set_id = "12345678901234567890123456789012"
version_id = "1"
- update_type = "insert" # or "remove-attribute" or "remove_instance"
+ force = False
+ update_type = "insert" # or "remove-attribute" or "remove_instance" or "revert"
if update_type == "insert":
# Insert or update an attribute.
# snippet-start:[python.example_code.medical-imaging.UpdateImageSetMetadata.insert_or_update_attributes]
@@ -848,7 +872,7 @@ def update_image_set_metadata_demo(self):
metadata = {"DICOMUpdates": {"updatableAttributes": attributes}}
self.update_image_set_metadata(
- data_store_id, image_set_id, version_id, metadata
+ data_store_id, image_set_id, version_id, metadata, force
)
# snippet-end:[python.example_code.medical-imaging.UpdateImageSetMetadata.insert_or_update_attributes]
elif update_type == "remove-attribute":
@@ -866,7 +890,7 @@ def update_image_set_metadata_demo(self):
metadata = {"DICOMUpdates": {"removableAttributes": attributes}}
self.update_image_set_metadata(
- data_store_id, image_set_id, version_id, metadata
+ data_store_id, image_set_id, version_id, metadata, force
)
# snippet-end:[python.example_code.medical-imaging.UpdateImageSetMetadata.remove_attributes]
elif update_type == "remove_instance":
@@ -887,10 +911,20 @@ def update_image_set_metadata_demo(self):
metadata = {"DICOMUpdates": {"removableAttributes": attributes}}
self.update_image_set_metadata(
- data_store_id, image_set_id, version_id, metadata
+ data_store_id, image_set_id, version_id, metadata, force
)
# snippet-end:[python.example_code.medical-imaging.UpdateImageSetMetadata.remove_instance]
+ elif update_type == "revert":
+ # Revert to a previous version.
+ # snippet-start:[python.example_code.medical-imaging.UpdateImageSetMetadata.revert]
+ metadata = {"revertToVersionId": "1"}
+
+ self.update_image_set_metadata(
+ data_store_id, image_set_id, version_id, metadata, force
+ )
+
+ # snippet-end:[python.example_code.medical-imaging.UpdateImageSetMetadata.revert]
print(f"Updated with update type {update_type}")
diff --git a/python/example_code/medical-imaging/requirements.txt b/python/example_code/medical-imaging/requirements.txt
index 3ef6d7037a9..0fb8ea1f7d5 100644
--- a/python/example_code/medical-imaging/requirements.txt
+++ b/python/example_code/medical-imaging/requirements.txt
@@ -1,5 +1,5 @@
-boto3>=1.34.78
+boto3>=1.34.149
pytest>=7.2.1
requests>=2.28.2
openjphpy>=0.1.0
-botocore>=1.34.78
\ No newline at end of file
+botocore>=1.34.149
\ No newline at end of file
diff --git a/python/example_code/medical-imaging/test/test_medical_imaging_basics.py b/python/example_code/medical-imaging/test/test_medical_imaging_basics.py
index 1dc069f1322..3f6a25e8303 100644
--- a/python/example_code/medical-imaging/test/test_medical_imaging_basics.py
+++ b/python/example_code/medical-imaging/test/test_medical_imaging_basics.py
@@ -5,10 +5,11 @@
Unit tests for medical_imaging_basics functions.
"""
+import os
+
import boto3
-from botocore.exceptions import ClientError
import pytest
-import os
+from botocore.exceptions import ClientError
from medical_imaging_basics import MedicalImagingWrapper
@@ -295,6 +296,7 @@ def test_update_image_set_metadata(make_stubber, error_code):
datastore_id = "abcdedf1234567890abcdef123456789"
image_set_id = "cccccc1234567890abcdef123456789"
version_id = "1"
+ force = False
metadata = {
"DICOMUpdates": {
"updatableAttributes": '{"SchemaVersion":1.1,"Patient":{"DICOM":{"PatientName":"Garcia^Gloria"}}}'
@@ -302,18 +304,18 @@ def test_update_image_set_metadata(make_stubber, error_code):
}
medical_imaging_stubber.stub_update_image_set_metadata(
- datastore_id, image_set_id, version_id, metadata, error_code=error_code
+ datastore_id, image_set_id, version_id, metadata, force, error_code=error_code
)
if error_code is None:
wrapper.update_image_set_metadata(
- datastore_id, image_set_id, version_id, metadata
+ datastore_id, image_set_id, version_id, metadata, force
)
else:
with pytest.raises(ClientError) as exc_info:
wrapper.update_image_set_metadata(
- datastore_id, image_set_id, version_id, metadata
+ datastore_id, image_set_id, version_id, metadata, force
)
assert exc_info.value.response["Error"]["Code"] == error_code
@@ -351,6 +353,8 @@ def test_copy_image_set_with_destination(make_stubber, error_code):
version_id = "1"
destination_image_set_id = "cccccc1234567890abcdef123456789"
destination_version_id = "1"
+ force = True
+ subset_Id = "cccccc1234567890abcdef123456789"
medical_imaging_stubber.stub_copy_image_set_with_destination(
datastore_id,
@@ -358,6 +362,8 @@ def test_copy_image_set_with_destination(make_stubber, error_code):
version_id,
destination_image_set_id,
destination_version_id,
+ force,
+ subset_Id,
error_code=error_code,
)
@@ -368,6 +374,8 @@ def test_copy_image_set_with_destination(make_stubber, error_code):
version_id,
destination_image_set_id,
destination_version_id,
+ force,
+ [subset_Id],
)
else:
@@ -378,6 +386,8 @@ def test_copy_image_set_with_destination(make_stubber, error_code):
version_id,
destination_image_set_id,
destination_version_id,
+ force,
+ [subset_Id],
)
assert exc_info.value.response["Error"]["Code"] == error_code
diff --git a/python/test_tools/medical_imaging_stubber.py b/python/test_tools/medical_imaging_stubber.py
index 031b2423d8e..29ca5887bab 100644
--- a/python/test_tools/medical_imaging_stubber.py
+++ b/python/test_tools/medical_imaging_stubber.py
@@ -9,6 +9,7 @@
import botocore
import io
import gzip
+import json
class MedicalImagingStubber(ExampleStubber):
@@ -241,13 +242,14 @@ def stub_list_image_set_versions(self, datastore_id, image_set_id, error_code=No
)
def stub_update_image_set_metadata(
- self, datastore_id, image_set_id, version_id, metadata, error_code=None
+ self, datastore_id, image_set_id, version_id, metadata, force, error_code=None
):
expected_params = {
"datastoreId": datastore_id,
"imageSetId": image_set_id,
"latestVersionId": version_id,
"updateImageSetMetadataUpdates": metadata,
+ "force": force,
}
response = {
@@ -278,6 +280,7 @@ def stub_copy_image_set_without_destination(
"copyImageSetInformation": {
"sourceImageSet": {"latestVersionId": version_id}
},
+ "force": False,
}
response = {
@@ -303,8 +306,14 @@ def stub_copy_image_set_with_destination(
version_id,
destination_image_set_id,
destination_version_id,
+ force,
+ subset,
error_code=None,
):
+ copiable_attributes = {
+ "SchemaVersion": "1.1",
+ "Study": {"Series": {"imageSetId": {"Instances": {subset: {}}}}},
+ }
expected_params = {
"datastoreId": datastore_id,
"sourceImageSetId": image_set_id,
@@ -313,8 +322,14 @@ def stub_copy_image_set_with_destination(
"imageSetId": destination_image_set_id,
"latestVersionId": destination_version_id,
},
- "sourceImageSet": {"latestVersionId": version_id},
+ "sourceImageSet": {
+ "latestVersionId": version_id,
+ "DICOMCopies": {
+ "copiableAttributes": json.dumps(copiable_attributes)
+ },
+ },
},
+ "force": force,
}
response = {
From 344dd498d22b50a063e1b760abb8e78cd0b5f558 Mon Sep 17 00:00:00 2001
From: Steven Meyer <108885656+meyertst-aws@users.noreply.github.com>
Date: Fri, 2 Aug 2024 10:24:50 -0400
Subject: [PATCH 16/98] Cpp: code improvements EC2 (#6698)
---
.doc_gen/metadata/ec2_metadata.yaml | 45 +++----
.doc_gen/validation.yaml | 1 +
cpp/example_code/ec2/CMakeLists.txt | 12 +-
cpp/example_code/ec2/README.md | 44 +++----
cpp/example_code/ec2/allocate_address.cpp | 31 +++--
cpp/example_code/ec2/associate_address.cpp | 87 +++++++++++++
.../ec2/authorize_security_group_ingress.cpp | 120 ++++++++++++++++++
cpp/example_code/ec2/create_key_pair.cpp | 35 +++--
.../ec2/create_security_group.cpp | 85 +------------
cpp/example_code/ec2/create_tags.cpp | 90 +++++++++++++
cpp/example_code/ec2/delete_key_pair.cpp | 12 +-
.../ec2/delete_security_group.cpp | 13 +-
cpp/example_code/ec2/describe_addresses.cpp | 15 +--
.../ec2/describe_availability_zones.cpp | 91 +++++++++++++
cpp/example_code/ec2/describe_instances.cpp | 18 ++-
cpp/example_code/ec2/describe_key_pairs.cpp | 13 +-
.../ec2/describe_regions_and_zones.cpp | 60 ++-------
.../ec2/describe_security_groups.cpp | 13 +-
cpp/example_code/ec2/ec2_samples.h | 106 ++++++++++------
cpp/example_code/ec2/hello_ec2/hello_ec2.cpp | 5 +-
cpp/example_code/ec2/monitor_instance.cpp | 50 ++++----
cpp/example_code/ec2/reboot_instance.cpp | 20 ++-
cpp/example_code/ec2/release_address.cpp | 13 +-
...{create_instance.cpp => run_instances.cpp} | 43 ++-----
cpp/example_code/ec2/start_stop_instance.cpp | 64 +++++-----
cpp/example_code/ec2/terminate_instances.cpp | 13 +-
cpp/example_code/ec2/tests/CMakeLists.txt | 28 +---
cpp/example_code/ec2/tests/ec2_gtests.cpp | 58 ++++++---
cpp/example_code/ec2/tests/ec2_gtests.h | 7 +-
.../ec2/tests/gtest_allocate_address.cpp | 5 +-
.../ec2/tests/gtest_associate_address.cpp | 38 ++++++
...gtest_authorize_security_group_ingress.cpp | 25 ++++
.../ec2/tests/gtest_create_key_pair.cpp | 2 +-
.../ec2/tests/gtest_create_security_group.cpp | 2 +-
.../ec2/tests/gtest_create_tags.cpp | 29 +++++
.../ec2/tests/gtest_delete_key_pair.cpp | 2 +-
.../ec2/tests/gtest_delete_security_group.cpp | 2 +-
.../ec2/tests/gtest_describe_addresses.cpp | 2 +-
.../gtest_describe_availability_zones.cpp | 24 ++++
.../ec2/tests/gtest_describe_instances.cpp | 2 +-
.../ec2/tests/gtest_describe_key_pairs.cpp | 2 +-
.../gtest_describe_regions_and_zones.cpp | 2 +-
.../tests/gtest_describe_security_groups.cpp | 2 +-
.../ec2/tests/gtest_monitor_instance.cpp | 4 +-
.../ec2/tests/gtest_reboot_instance.cpp | 2 +-
.../ec2/tests/gtest_release_address.cpp | 2 +-
...e_instance.cpp => gtest_run_instances.cpp} | 2 +-
.../ec2/tests/gtest_start_stop_instance.cpp | 4 +-
.../ec2/tests/gtest_terminate_instances.cpp | 2 +-
49 files changed, 863 insertions(+), 484 deletions(-)
create mode 100644 cpp/example_code/ec2/associate_address.cpp
create mode 100644 cpp/example_code/ec2/authorize_security_group_ingress.cpp
create mode 100644 cpp/example_code/ec2/create_tags.cpp
create mode 100644 cpp/example_code/ec2/describe_availability_zones.cpp
rename cpp/example_code/ec2/{create_instance.cpp => run_instances.cpp} (65%)
create mode 100644 cpp/example_code/ec2/tests/gtest_associate_address.cpp
create mode 100644 cpp/example_code/ec2/tests/gtest_authorize_security_group_ingress.cpp
create mode 100644 cpp/example_code/ec2/tests/gtest_create_tags.cpp
create mode 100644 cpp/example_code/ec2/tests/gtest_describe_availability_zones.cpp
rename cpp/example_code/ec2/tests/{gtest_create_instance.cpp => gtest_run_instances.cpp} (93%)
diff --git a/.doc_gen/metadata/ec2_metadata.yaml b/.doc_gen/metadata/ec2_metadata.yaml
index e1568abd7ba..3b8880fda8b 100644
--- a/.doc_gen/metadata/ec2_metadata.yaml
+++ b/.doc_gen/metadata/ec2_metadata.yaml
@@ -155,7 +155,7 @@ ec2_CreateKeyPair:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.create_key_pair.code
+ - cpp.example_code.ec2,CreateKeyPair
Bash:
versions:
- sdk_version: 2
@@ -233,7 +233,7 @@ ec2_DescribeKeyPairs:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.describe_key_pairs.code
+ - cpp.example_code.ec2.DescribeKeyPairs
Bash:
versions:
- sdk_version: 2
@@ -322,7 +322,6 @@ ec2_CreateSecurityGroup:
excerpts:
- description:
snippet_tags:
- - cpp.example_code.ec2.create_security_group.client
- cpp.example_code.ec2.CreateSecurityGroup
Bash:
versions:
@@ -398,11 +397,9 @@ ec2_RunInstances:
versions:
- sdk_version: 1
github: cpp/example_code/ec2
- sdkguide:
excerpts:
- description:
snippet_tags:
- - cpp.example_code.ec2.create_instance.client
- cpp.example_code.ec2.RunInstances
Bash:
versions:
@@ -498,7 +495,7 @@ ec2_StartInstances:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.start_instance.code
+ - cpp.example_code.ec2.StartInstances
Bash:
versions:
- sdk_version: 2
@@ -593,7 +590,7 @@ ec2_StopInstances:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.stop_instance.code
+ - cpp.example_code.ec2.StopInstances
Bash:
versions:
- sdk_version: 2
@@ -680,8 +677,8 @@ ec2_AllocateAddress:
excerpts:
- description:
snippet_tags:
- - cpp.example_code.ec2.allocate_address.client
- cpp.example_code.ec2.AllocateAddress
+ - cpp.example_code.ec2.AllocateAddress2
Bash:
versions:
- sdk_version: 2
@@ -768,7 +765,6 @@ ec2_AssociateAddress:
excerpts:
- description:
snippet_tags:
- - cpp.example_code.ec2.allocate_address.client
- cpp.example_code.ec2.AssociateAddress
Bash:
versions:
@@ -918,7 +914,7 @@ ec2_ReleaseAddress:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.release_address.code
+ - cpp.example_code.ec2.ReleaseAddress
Bash:
versions:
- sdk_version: 2
@@ -979,9 +975,10 @@ ec2_AuthorizeSecurityGroupIngress:
excerpts:
- description:
snippet_tags:
- - cpp.example_code.ec2.allocate_address.client
- - ec2.cpp.configure_security_group02.code
- - ec2.cpp.configure_security_group03.code
+ - cpp.example_code.ec2.AuthorizeSecurityGroupIngress
+ - description: Utility function to build an ingress rule.
+ snippet_tags:
+ - cpp.example_code.ec2.BuildSampleIngressRule
JavaScript:
versions:
- sdk_version: 3
@@ -1068,7 +1065,7 @@ ec2_DeleteKeyPair:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.delete_key_pair.code
+ - cpp.example_code.ec2.DeleteKeyPair
Bash:
versions:
- sdk_version: 2
@@ -1146,7 +1143,7 @@ ec2_DescribeSecurityGroups:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.describe_security_groups.code
+ - cpp.example_code.ec2.DescribeSecurityGroups
Bash:
versions:
- sdk_version: 2
@@ -1224,7 +1221,7 @@ ec2_DeleteSecurityGroup:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.delete_security_group.code
+ - cpp.example_code.ec2.DeleteSecurityGroup
Bash:
versions:
- sdk_version: 2
@@ -1422,7 +1419,7 @@ ec2_DescribeInstances:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.describe_instances.code
+ - cpp.example_code.ec2.DescribeInstances
Bash:
versions:
- sdk_version: 2
@@ -1479,7 +1476,6 @@ ec2_DescribeRegions:
excerpts:
- description:
snippet_tags:
- - cpp.example_code.ec2.describe_regions.client
- cpp.example_code.ec2.DescribeRegions
services:
ec2: {DescribeRegions}
@@ -1521,7 +1517,7 @@ ec2_MonitorInstances:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.enable_monitor_instance.code
+ - cpp.example_code.ec2.MonitorInstances
services:
ec2: {MonitorInstances}
ec2_UnmonitorInstances:
@@ -1533,7 +1529,7 @@ ec2_UnmonitorInstances:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.disable_monitor_instance.code
+ - cpp.example_code.ec2.UnmonitorInstances
JavaScript:
versions:
- sdk_version: 3
@@ -1591,7 +1587,7 @@ ec2_RebootInstances:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.reboot_instance.code
+ - cpp.example_code.ec2.RebootInstances
Python:
versions:
- sdk_version: 3
@@ -1734,7 +1730,7 @@ ec2_DescribeAddresses:
excerpts:
- description:
snippet_tags:
- - ec2.cpp.describe_addresses.code
+ - cpp.example_code.ec2.DescribeAddresses
services:
ec2: {DescribeAddresses}
ec2_DescribeAvailabilityZones:
@@ -1762,8 +1758,7 @@ ec2_DescribeAvailabilityZones:
excerpts:
- description:
snippet_tags:
- - cpp.example_code.ec2.describe_regions.client
- - ec2.cpp.describe_zones.code
+ - cpp.example_code.ec2.DescribeAvailabilityZones
Python:
versions:
- sdk_version: 3
@@ -1781,11 +1776,9 @@ ec2_CreateTags:
versions:
- sdk_version: 1
github: cpp/example_code/ec2
- sdkguide:
excerpts:
- description:
snippet_tags:
- - cpp.example_code.ec2.create_instance.client
- cpp.example_code.ec2.CreateTags
services:
ec2: {CreateTags}
diff --git a/.doc_gen/validation.yaml b/.doc_gen/validation.yaml
index c368e7914a1..bbf7223f678 100644
--- a/.doc_gen/validation.yaml
+++ b/.doc_gen/validation.yaml
@@ -206,6 +206,7 @@ allow_list:
- "com/pinterest/ktlint/releases/download/1"
- "aws/s3/model/AbortMultipartUploadRequest"
- "src/main/kotlin/com/example/ecr/HelloECR"
+ - "aws/ec2/model/DisassociateAddressRequest"
sample_files:
- "README.md"
- "chat_sfn_state_machine.json"
diff --git a/cpp/example_code/ec2/CMakeLists.txt b/cpp/example_code/ec2/CMakeLists.txt
index d09da3d7317..d04e7f746f8 100644
--- a/cpp/example_code/ec2/CMakeLists.txt
+++ b/cpp/example_code/ec2/CMakeLists.txt
@@ -13,14 +13,6 @@ project("${SERVICE_NAME}-examples")
# Set the C++ standard to use to build this target.
set(CMAKE_CXX_STANDARD 11)
-# Build shared libraries by default.
-set(BUILD_SHARED_LIBS ON)
-
-# Enable CTest for testing these code examples.
-if (BUILD_TESTS)
- include(CTest)
-endif ()
-
# Use the MSVC variable to determine if this is a Windows build.
set(WINDOWS_BUILD ${MSVC})
@@ -37,7 +29,7 @@ if (WINDOWS_BUILD AND AWSSDK_INSTALL_AS_SHARED_LIBS)
# Copy relevant AWS SDK for C++ libraries into the current binary directory for running and debugging.
# set(BIN_SUB_DIR "/Debug") # If you are building from the command line, you may need to uncomment this
- # and set the proper subdirectory to the executables' location.
+ # and set the proper subdirectory to the executable's location.
AWSSDK_CPY_DYN_LIBS(SERVICE_COMPONENTS "" ${CMAKE_CURRENT_BINARY_DIR}${BIN_SUB_DIR})
endif ()
@@ -47,7 +39,7 @@ endif ()
if (NOT DEFINED AWSDOC_SOURCE)
file(GLOB AWSDOC_SOURCE
"*.cpp"
- )
+ )
endif ()
foreach (file ${AWSDOC_SOURCE})
diff --git a/cpp/example_code/ec2/README.md b/cpp/example_code/ec2/README.md
index 958699dfd2c..ca7d4b97265 100644
--- a/cpp/example_code/ec2/README.md
+++ b/cpp/example_code/ec2/README.md
@@ -46,28 +46,28 @@ Next, for information on code example structures and how to build and run the ex
Code excerpts that show you how to call individual service functions.
-- [AllocateAddress](allocate_address.cpp#L43)
-- [AssociateAddress](allocate_address.cpp#L58)
-- [AuthorizeSecurityGroupIngress](allocate_address.cpp#L39)
-- [CreateKeyPair](create_key_pair.cpp#L33)
-- [CreateSecurityGroup](create_security_group.cpp#L59)
-- [CreateTags](create_instance.cpp#L72)
-- [DeleteKeyPair](delete_key_pair.cpp#L34)
-- [DeleteSecurityGroup](delete_security_group.cpp#L32)
-- [DescribeAddresses](describe_addresses.cpp#L33)
-- [DescribeAvailabilityZones](describe_regions_and_zones.cpp#L38)
-- [DescribeInstances](describe_instances.cpp#L33)
-- [DescribeKeyPairs](describe_key_pairs.cpp#L33)
-- [DescribeRegions](describe_regions_and_zones.cpp#L41)
-- [DescribeSecurityGroups](describe_security_groups.cpp#L34)
-- [MonitorInstances](monitor_instance.cpp#L35)
-- [RebootInstances](reboot_instance.cpp#L32)
-- [ReleaseAddress](release_address.cpp#L31)
-- [RunInstances](create_instance.cpp#L44)
-- [StartInstances](start_stop_instance.cpp#L38)
-- [StopInstances](start_stop_instance.cpp#L84)
-- [TerminateInstances](terminate_instances.cpp#L30)
-- [UnmonitorInstances](monitor_instance.cpp#L82)
+- [AllocateAddress](allocate_address.cpp#L73)
+- [AssociateAddress](associate_address.cpp#L20)
+- [AuthorizeSecurityGroupIngress](authorize_security_group_ingress.cpp#L26)
+- [CreateKeyPair](create_key_pair.cpp#L23)
+- [CreateSecurityGroup](create_security_group.cpp#L22)
+- [CreateTags](create_tags.cpp#L21)
+- [DeleteKeyPair](delete_key_pair.cpp#L22)
+- [DeleteSecurityGroup](delete_security_group.cpp#L22)
+- [DescribeAddresses](describe_addresses.cpp#L24)
+- [DescribeAvailabilityZones](describe_availability_zones.cpp#L23)
+- [DescribeInstances](describe_instances.cpp#L24)
+- [DescribeKeyPairs](describe_key_pairs.cpp#L24)
+- [DescribeRegions](describe_regions_and_zones.cpp#L24)
+- [DescribeSecurityGroups](describe_security_groups.cpp#L24)
+- [MonitorInstances](monitor_instance.cpp#L23)
+- [RebootInstances](reboot_instance.cpp#L22)
+- [ReleaseAddress](release_address.cpp#L22)
+- [RunInstances](run_instances.cpp#L23)
+- [StartInstances](start_stop_instance.cpp#L27)
+- [StopInstances](start_stop_instance.cpp#L72)
+- [TerminateInstances](terminate_instances.cpp#L20)
+- [UnmonitorInstances](monitor_instance.cpp#L70)
diff --git a/cpp/example_code/ec2/allocate_address.cpp b/cpp/example_code/ec2/allocate_address.cpp
index aca4a330986..2c4a2fa189b 100644
--- a/cpp/example_code/ec2/allocate_address.cpp
+++ b/cpp/example_code/ec2/allocate_address.cpp
@@ -13,34 +13,31 @@
**/
// snippet-start:[ec2.cpp.allocate_address.inc]
-#include
#include
#include
-#include
#include
-#include
#include
// snippet-end:[ec2.cpp.allocate_address.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.AllocateAddress]
//! Allocate an Elastic IP address and associate it with an Amazon Elastic Compute Cloud
//! (Amazon EC2) instance.
/*!
- \sa AllocateAndAssociateAddress()
\param instanceID: An EC2 instance ID.
- \param allocationId: String to return the allocation ID of the address.
+ \param[out] publicIPAddress: String to return the public IP address.
+ \param[out] allocationID: String to return the allocation ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::AllocateAndAssociateAddress(const Aws::String &instanceId,
- Aws::String &allocationId,
+bool AwsDoc::EC2::allocateAndAssociateAddress(const Aws::String &instanceId, Aws::String &publicIPAddress,
+ Aws::String &allocationID,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.allocate_address.code]
// snippet-start:[cpp.example_code.ec2.allocate_address.client]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
// snippet-end:[cpp.example_code.ec2.allocate_address.client]
- // snippet-start:[cpp.example_code.ec2.AllocateAddress]
Aws::EC2::Model::AllocateAddressRequest request;
request.SetDomain(Aws::EC2::Model::DomainType::vpc);
@@ -51,31 +48,32 @@ bool AwsDoc::EC2::AllocateAndAssociateAddress(const Aws::String &instanceId,
outcome.GetError().GetMessage() << std::endl;
return false;
}
+ const Aws::EC2::Model::AllocateAddressResponse &response = outcome.GetResult();
+ allocationID = response.GetAllocationId();
+ publicIPAddress = response.GetPublicIp();
- allocationId = outcome.GetResult().GetAllocationId();
// snippet-end:[cpp.example_code.ec2.AllocateAddress]
- // snippet-start:[cpp.example_code.ec2.AssociateAddress]
Aws::EC2::Model::AssociateAddressRequest associate_request;
associate_request.SetInstanceId(instanceId);
- associate_request.SetAllocationId(allocationId);
+ associate_request.SetAllocationId(allocationID);
const Aws::EC2::Model::AssociateAddressOutcome associate_outcome =
ec2Client.AssociateAddress(associate_request);
if (!associate_outcome.IsSuccess()) {
- std::cerr << "Failed to associate Elastic IP address " << allocationId
+ std::cerr << "Failed to associate Elastic IP address " << allocationID
<< " with instance " << instanceId << ":" <<
associate_outcome.GetError().GetMessage() << std::endl;
return false;
}
- std::cout << "Successfully associated Elastic IP address " << allocationId
+ std::cout << "Successfully associated Elastic IP address " << allocationID
<< " with instance " << instanceId << std::endl;
- // snippet-end:[cpp.example_code.ec2.AssociateAddress]
// snippet-end:[ec2.cpp.allocate_address.code]
-
+// snippet-start:[cpp.example_code.ec2.AllocateAddress2]
return true;
}
+// snippet-end:[cpp.example_code.ec2.AllocateAddress2]
/*
* main function
@@ -102,8 +100,9 @@ int main(int argc, char **argv) {
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
Aws::String instanceID = argv[1];
+ Aws::String publicIPAddress;
Aws::String allocationID;
- AwsDoc::EC2::AllocateAndAssociateAddress(instanceID, allocationID,
+ AwsDoc::EC2::allocateAndAssociateAddress(instanceID, publicIPAddress, allocationID,
clientConfig);
}
Aws::ShutdownAPI(options);
diff --git a/cpp/example_code/ec2/associate_address.cpp b/cpp/example_code/ec2/associate_address.cpp
new file mode 100644
index 00000000000..9006eb64a4b
--- /dev/null
+++ b/cpp/example_code/ec2/associate_address.cpp
@@ -0,0 +1,87 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Before running this C++ code example, set up your development environment, including your credentials.
+ *
+ * For more information, see the following documentation topic:
+ *
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started.html
+ *
+ * For information on the structure of the code examples and how to build and run the examples, see
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started-code-examples.html.
+ *
+ **/
+
+#include
+#include
+#include
+#include "ec2_samples.h"
+
+// snippet-start:[cpp.example_code.ec2.AssociateAddress]
+//! Associate an Elastic IP address with an EC2 instance.
+/*!
+ \param instanceId: An EC2 instance ID.
+ \param allocationId: An Elastic IP allocation ID.
+ \param[out] associationID: String to receive the association ID.
+ \param clientConfiguration: AWS client configuration.
+ \return bool: True if the address was associated with the instance; otherwise, false.
+ */
+bool AwsDoc::EC2::associateAddress(const Aws::String &instanceId, const Aws::String &allocationId,
+ Aws::String &associationID,
+ const Aws::Client::ClientConfiguration &clientConfiguration) {
+ Aws::EC2::EC2Client ec2Client(clientConfiguration);
+
+ Aws::EC2::Model::AssociateAddressRequest request;
+ request.SetInstanceId(instanceId);
+ request.SetAllocationId(allocationId);
+
+ Aws::EC2::Model::AssociateAddressOutcome outcome = ec2Client.AssociateAddress(request);
+
+ if (!outcome.IsSuccess()) {
+ std::cerr << "Failed to associate address " << allocationId <<
+ " with instance " << instanceId << ": " <<
+ outcome.GetError().GetMessage() << std::endl;
+ } else {
+ std::cout << "Successfully associated address " << allocationId <<
+ " with instance " << instanceId << std::endl;
+ associationID = outcome.GetResult().GetAssociationId();
+ }
+
+ return outcome.IsSuccess();
+}
+// snippet-end:[cpp.example_code.ec2.AssociateAddress]
+
+/*
+ * main function
+ *
+ * Usage:
+ * run_associate_address
+ *
+ */
+
+#ifndef TESTING_BUILD
+
+int main(int argc, char **argv) {
+ if (argc != 3) {
+ std::cout << "Usage: run_associate_address " << std::endl;
+ return 1;
+ }
+
+ Aws::SDKOptions options;
+ Aws::InitAPI(options);
+ {
+ Aws::String instanceId = argv[1];
+ Aws::String allocationId = argv[2];
+
+ Aws::Client::ClientConfiguration clientConfig;
+ // Optional: Set to the AWS Region (overrides config file).
+ // clientConfig.region = "us-east-1";
+
+ Aws::String associationID;
+ AwsDoc::EC2::associateAddress(instanceId, allocationId, associationID, clientConfig);
+ }
+ Aws::ShutdownAPI(options);
+ return 0;
+}
+
+#endif // TESTING_BUILD
\ No newline at end of file
diff --git a/cpp/example_code/ec2/authorize_security_group_ingress.cpp b/cpp/example_code/ec2/authorize_security_group_ingress.cpp
new file mode 100644
index 00000000000..a5a594915dc
--- /dev/null
+++ b/cpp/example_code/ec2/authorize_security_group_ingress.cpp
@@ -0,0 +1,120 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Before running this C++ code example, set up your development environment, including your credentials.
+ *
+ * For more information, see the following documentation topic:
+ *
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started.html
+ *
+ * For information on the structure of the code examples and how to build and run the examples, see
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started-code-examples.html.
+ *
+ **/
+
+#include
+#include
+// snippet-start:[ec2.cpp.configure_security_group.inc]
+#include
+// snippet-end:[ec2.cpp.configure_security_group.inc]
+#include
+#include "ec2_samples.h"
+
+static void buildSampleIngressRule(
+ Aws::EC2::Model::AuthorizeSecurityGroupIngressRequest &authorize_request);
+
+// snippet-start:[cpp.example_code.ec2.AuthorizeSecurityGroupIngress]
+//! Authorize ingress to an Amazon Elastic Compute Cloud (Amazon EC2) group.
+/*!
+ \param groupID: The EC2 group ID.
+ \param clientConfiguration: The ClientConfiguration object.
+ \return bool: True if the operation was successful, false otherwise.
+ */
+bool
+AwsDoc::EC2::authorizeSecurityGroupIngress(const Aws::String &groupID,
+ const Aws::Client::ClientConfiguration &clientConfiguration) {
+ Aws::EC2::EC2Client ec2Client(clientConfiguration);
+ // snippet-start:[ec2.cpp.configure_security_group01.code]'
+ Aws::EC2::Model::AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest;
+ authorizeSecurityGroupIngressRequest.SetGroupId(groupID);
+ // snippet-end:[ec2.cpp.configure_security_group01.code]
+ buildSampleIngressRule(authorizeSecurityGroupIngressRequest);
+
+ // snippet-start:[ec2.cpp.configure_security_group03.code]
+ Aws::EC2::Model::AuthorizeSecurityGroupIngressOutcome authorizeSecurityGroupIngressOutcome =
+ ec2Client.AuthorizeSecurityGroupIngress(authorizeSecurityGroupIngressRequest);
+
+ if (authorizeSecurityGroupIngressOutcome.IsSuccess()) {
+ std::cout << "Successfully authorized security group ingress." << std::endl;
+ } else {
+ std::cerr << "Error authorizing security group ingress: "
+ << authorizeSecurityGroupIngressOutcome.GetError().GetMessage() << std::endl;
+ }
+ // snippet-end:[ec2.cpp.configure_security_group03.code]
+
+ return authorizeSecurityGroupIngressOutcome.IsSuccess();
+}
+// snippet-end:[cpp.example_code.ec2.AuthorizeSecurityGroupIngress]
+// snippet-start:[cpp.example_code.ec2.BuildSampleIngressRule]
+//! Build a sample ingress rule.
+/*!
+ \param authorize_request: An 'AuthorizeSecurityGroupIngressRequest' instance.
+ \return void:
+ */
+void buildSampleIngressRule(
+ Aws::EC2::Model::AuthorizeSecurityGroupIngressRequest &authorize_request) {
+ // snippet-start:[ec2.cpp.configure_security_group02.code]
+ Aws::String ingressIPRange = "203.0.113.0/24"; // Configure this for your allowed IP range.
+ Aws::EC2::Model::IpRange ip_range;
+ ip_range.SetCidrIp(ingressIPRange);
+
+ Aws::EC2::Model::IpPermission permission1;
+ permission1.SetIpProtocol("tcp");
+ permission1.SetToPort(80);
+ permission1.SetFromPort(80);
+ permission1.AddIpRanges(ip_range);
+
+ authorize_request.AddIpPermissions(permission1);
+
+ Aws::EC2::Model::IpPermission permission2;
+ permission2.SetIpProtocol("tcp");
+ permission2.SetToPort(22);
+ permission2.SetFromPort(22);
+ permission2.AddIpRanges(ip_range);
+
+ authorize_request.AddIpPermissions(permission2);
+ // snippet-end:[ec2.cpp.configure_security_group02.code]
+}
+// snippet-end:[cpp.example_code.ec2.BuildSampleIngressRule]
+
+/*
+ *
+ * main function
+ *
+ * Usage: run_authorize_security_group_ingress
+ *
+ */
+
+#ifndef TESTING_BUILD
+
+int main(int argc, char **argv) {
+ if (argc != 2) {
+ std::cout << "Usage: run_authorize_security_group_ingress " << std::endl;
+ return 1;
+ }
+
+ Aws::SDKOptions options;
+ Aws::InitAPI(options);
+ {
+ Aws::String groupID = argv[1];
+ Aws::Client::ClientConfiguration clientConfig;
+ // Optional: Set to the region where your resources reside.
+ clientConfig.region = "us-east-1";
+
+ AwsDoc::EC2::authorizeSecurityGroupIngress(groupID, clientConfig);
+ }
+ Aws::ShutdownAPI(options);
+ return 0;
+}
+
+#endif // TESTING_BUILD
\ No newline at end of file
diff --git a/cpp/example_code/ec2/create_key_pair.cpp b/cpp/example_code/ec2/create_key_pair.cpp
index cb0017eb464..9fb05c3d571 100644
--- a/cpp/example_code/ec2/create_key_pair.cpp
+++ b/cpp/example_code/ec2/create_key_pair.cpp
@@ -13,22 +13,22 @@
**/
// snippet-start:[ec2.cpp.create_key_pair.inc]
-#include
#include
#include
-#include
#include
+#include
// snippet-end:[ec2.cpp.create_key_pair.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2,CreateKeyPair]
//! Create an Amazon Elastic Compute Cloud (Amazon EC2) instance key pair.
/*!
- \sa CreateKeyPair()
\param keyPairName: A name for a key pair.
+ \param keyFilePath: File path where the credentials are stored. Ignored if it is an empty string;
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::CreateKeyPair(const Aws::String &keyPairName,
+bool AwsDoc::EC2::createKeyPair(const Aws::String &keyPairName, const Aws::String &keyFilePath,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.create_key_pair.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
@@ -37,31 +37,39 @@ bool AwsDoc::EC2::CreateKeyPair(const Aws::String &keyPairName,
Aws::EC2::Model::CreateKeyPairOutcome outcome = ec2Client.CreateKeyPair(request);
if (!outcome.IsSuccess()) {
- std::cerr << "Failed to create key pair:" <<
+ std::cerr << "Failed to create key pair - " << keyPairName << ". " <<
outcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
std::cout << "Successfully created key pair named " <<
keyPairName << std::endl;
+ if (!keyFilePath.empty()) {
+ std::ofstream keyFile(keyFilePath.c_str());
+ keyFile << outcome.GetResult().GetKeyMaterial();
+ keyFile.close();
+ std::cout << "Keys written to the file " <<
+ keyFilePath << std::endl;
+ }
+
}
// snippet-end:[ec2.cpp.create_key_pair.code]
return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2,CreateKeyPair]
/*
* main function
*
-* Usage: 'run_create_key_pair '
+* Usage: 'run_create_key_pair [key_file_path]'
*
*/
#ifndef TESTING_BUILD
int main(int argc, char **argv) {
- if (argc != 2) {
- std::cout << "run_create_key_pair "
+ if (argc < 2) {
+ std::cout << "Usage: 'run_create_key_pair [key_file_path]"
<< std::endl;
return 1;
}
@@ -73,7 +81,12 @@ int main(int argc, char **argv) {
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
Aws::String keyPairName = argv[1];
- AwsDoc::EC2::CreateKeyPair(keyPairName, clientConfig);
+ Aws::String keyFilePath;
+ if (argc > 2) {
+ keyFilePath = argv[2];
+ }
+
+ AwsDoc::EC2::createKeyPair(keyPairName, keyFilePath, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/create_security_group.cpp b/cpp/example_code/ec2/create_security_group.cpp
index 20d487e6f48..6a8f787fffc 100644
--- a/cpp/example_code/ec2/create_security_group.cpp
+++ b/cpp/example_code/ec2/create_security_group.cpp
@@ -13,50 +13,30 @@
**/
// snippet-start:[ec2.cpp.create_security_group.inc]
-#include
#include
#include
-#include
// snippet-end:[ec2.cpp.create_security_group.inc]
-// snippet-start:[ec2.cpp.configure_security_group.inc]
-#include
-// snippet-end:[ec2.cpp.configure_security_group.inc]
#include
#include "ec2_samples.h"
-namespace AwsDoc {
- namespace EC2 {
- //! Build a sample ingress rule.
- /*!
- \sa BuildSampleIngressRule()
- \param authorize_request: An 'AuthorizeSecurityGroupIngressRequest' instance.
- \return void:
- */
- void BuildSampleIngressRule(
- Aws::EC2::Model::AuthorizeSecurityGroupIngressRequest &authorize_request);
- } // EC2 {
-} // AwsDoc
-
+// snippet-start:[cpp.example_code.ec2.CreateSecurityGroup]
//! Create a security group.
/*!
- \sa CreateSecurityGroup()
\param groupName: A security group name.
\param description: A description.
\param vpcID: A virtual private cloud (VPC) ID.
- \param groupIDResult: A string to receive the group ID.
+ \param[out] groupIDResult: A string to receive the group ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::CreateSecurityGroup(const Aws::String &groupName,
+bool AwsDoc::EC2::createSecurityGroup(const Aws::String &groupName,
const Aws::String &description,
const Aws::String &vpcID,
Aws::String &groupIDResult,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.create_security_group.code]
- // snippet-start:[cpp.example_code.ec2.create_security_group.client]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
- // snippet-end:[cpp.example_code.ec2.create_security_group.client]
- // snippet-start:[cpp.example_code.ec2.CreateSecurityGroup]
+
Aws::EC2::Model::CreateSecurityGroupRequest request;
request.SetGroupName(groupName);
@@ -75,65 +55,14 @@ bool AwsDoc::EC2::CreateSecurityGroup(const Aws::String &groupName,
std::cout << "Successfully created security group named " << groupName <<
std::endl;
// snippet-end:[ec2.cpp.create_security_group.code]
- // snippet-end:[cpp.example_code.ec2.CreateSecurityGroup]
-
- groupIDResult = outcome.GetResult().GetGroupId();
-
- // snippet-start:[ec2.cpp.configure_security_group01.code]
- Aws::EC2::Model::AuthorizeSecurityGroupIngressRequest authorizeRequest;
-
- authorizeRequest.SetGroupId(groupIDResult);
- // snippet-end:[ec2.cpp.configure_security_group01.code]
-
- BuildSampleIngressRule(authorizeRequest);
- // snippet-start:[ec2.cpp.configure_security_group03.code]
- const Aws::EC2::Model::AuthorizeSecurityGroupIngressOutcome authorizeOutcome =
- ec2Client.AuthorizeSecurityGroupIngress(authorizeRequest);
- if (!authorizeOutcome.IsSuccess()) {
- std::cerr << "Failed to set ingress policy for security group " <<
- groupName << ":" << authorizeOutcome.GetError().GetMessage() <<
- std::endl;
- return false;
- }
-
- std::cout << "Successfully added ingress policy to security group " <<
- groupName << std::endl;
- // snippet-end:[ec2.cpp.configure_security_group03.code]
+ groupIDResult = outcome.GetResult().GetGroupId();
return true;
}
+// snippet-end:[cpp.example_code.ec2.CreateSecurityGroup]
-//! Build a sample ingress rule.
-/*!
- \sa BuildSampleIngressRule()
- \param authorize_request: An 'AuthorizeSecurityGroupIngressRequest' instance.
- \return void:
- */
-void AwsDoc::EC2::BuildSampleIngressRule(
- Aws::EC2::Model::AuthorizeSecurityGroupIngressRequest &authorize_request) {
- // snippet-start:[ec2.cpp.configure_security_group02.code]
- Aws::EC2::Model::IpRange ip_range;
- ip_range.SetCidrIp("0.0.0.0/0");
-
- Aws::EC2::Model::IpPermission permission1;
- permission1.SetIpProtocol("tcp");
- permission1.SetToPort(80);
- permission1.SetFromPort(80);
- permission1.AddIpRanges(ip_range);
-
- authorize_request.AddIpPermissions(permission1);
-
- Aws::EC2::Model::IpPermission permission2;
- permission2.SetIpProtocol("tcp");
- permission2.SetToPort(22);
- permission2.SetFromPort(22);
- permission2.AddIpRanges(ip_range);
-
- authorize_request.AddIpPermissions(permission2);
- // snippet-end:[ec2.cpp.configure_security_group02.code]
-}
/*
@@ -165,7 +94,7 @@ int main(int argc, char **argv) {
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
Aws::String groupIDResult;
- AwsDoc::EC2::CreateSecurityGroup(group_name, group_desc, vpc_id,
+ AwsDoc::EC2::createSecurityGroup(group_name, group_desc, vpc_id,
groupIDResult, clientConfig);
}
Aws::ShutdownAPI(options);
diff --git a/cpp/example_code/ec2/create_tags.cpp b/cpp/example_code/ec2/create_tags.cpp
new file mode 100644
index 00000000000..84d120ac150
--- /dev/null
+++ b/cpp/example_code/ec2/create_tags.cpp
@@ -0,0 +1,90 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Before running this C++ code example, set up your development environment, including your credentials.
+ *
+ * For more information, see the following documentation topic:
+ *
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started.html
+ *
+ * For information on the structure of the code examples and how to build and run the examples, see
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started-code-examples.html.
+ *
+ **/
+
+#include
+#include
+#include
+#include
+#include "ec2_samples.h"
+
+// snippet-start:[cpp.example_code.ec2.CreateTags]
+//! Add or overwrite only the specified tags for the specified Amazon Elastic Compute Cloud (Amazon EC2) resource or resources.
+/*!
+ \param resources: The resources for the tags.
+ \param tags: Vector of tags.
+ \param clientConfiguration: AWS client configuration.
+ \return bool: Function succeeded.
+ */
+bool AwsDoc::EC2::createTags(const Aws::Vector &resources,
+ const Aws::Vector &tags,
+ const Aws::Client::ClientConfiguration &clientConfiguration) {
+ Aws::EC2::EC2Client ec2Client(clientConfiguration);
+
+ Aws::EC2::Model::CreateTagsRequest createTagsRequest;
+ createTagsRequest.SetResources(resources);
+ createTagsRequest.SetTags(tags);
+
+ Aws::EC2::Model::CreateTagsOutcome outcome = ec2Client.CreateTags(createTagsRequest);
+
+ if (outcome.IsSuccess()) {
+ std::cout << "Successfully created tags for resources" << std::endl;
+ } else {
+ std::cerr << "Failed to create tags for resources, " << outcome.GetError().GetMessage() << std::endl;
+ }
+
+ return outcome.IsSuccess();
+}
+// snippet-end:[cpp.example_code.ec2.CreateTags]
+
+/*
+ *
+ * main function
+ *
+ * Usage: 'run_create_tags '
+ *
+ */
+
+#ifndef TESTING_BUILD
+
+int main(int argc, char **argv) {
+ if (argc != 3) {
+ std::cout << "Usage: run_create_tags " << std::endl;
+ return 1;
+ }
+
+ Aws::SDKOptions options;
+ Aws::InitAPI(options);
+
+ {
+ Aws::String instanceId = argv[1];
+ Aws::String nameTag = argv[2];
+
+ Aws::Client::ClientConfiguration clientConfig;
+ // Optional: Set to the AWS Region (overrides config file).
+ // clientConfig.region = "us-east-1";
+
+ Aws::Vector resources;
+ resources.push_back(instanceId);
+
+ Aws::Vector tags;
+ tags.push_back(Aws::EC2::Model::Tag().WithKey("Name").WithValue(nameTag));
+
+ AwsDoc::EC2::createTags(resources, tags, clientConfig);
+ }
+
+ Aws::ShutdownAPI(options);
+ return 0;
+}
+
+#endif // TESTING_BUILD
\ No newline at end of file
diff --git a/cpp/example_code/ec2/delete_key_pair.cpp b/cpp/example_code/ec2/delete_key_pair.cpp
index 06b3c6f8518..2e1494b1887 100644
--- a/cpp/example_code/ec2/delete_key_pair.cpp
+++ b/cpp/example_code/ec2/delete_key_pair.cpp
@@ -13,23 +13,21 @@
*
**/
// snippet-start:[ec2.cpp.delete_key_pair.inc]
-#include
#include
#include
#include
// snippet-end:[ec2.cpp.delete_key_pair.inc]
#include "ec2_samples.h"
-
+// snippet-start:[cpp.example_code.ec2.DeleteKeyPair]
//! Delete an Amazon Elastic Compute Cloud (Amazon EC2) instance key pair.
/*!
- \sa DeleteKeyPair()
\param keyPairName: A name for a key pair.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::DeleteKeyPair(const Aws::String &keyPairName,
+bool AwsDoc::EC2::deleteKeyPair(const Aws::String &keyPairName,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.delete_key_pair.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
@@ -42,8 +40,7 @@ bool AwsDoc::EC2::DeleteKeyPair(const Aws::String &keyPairName,
if (!outcome.IsSuccess()) {
std::cerr << "Failed to delete key pair " << keyPairName <<
":" << outcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
std::cout << "Successfully deleted key pair named " << keyPairName <<
std::endl;
}
@@ -51,6 +48,7 @@ bool AwsDoc::EC2::DeleteKeyPair(const Aws::String &keyPairName,
return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.DeleteKeyPair]
/*
* main function
@@ -77,7 +75,7 @@ int main(int argc, char **argv) {
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
Aws::String keyPairName = argv[1];
- AwsDoc::EC2::DeleteKeyPair(keyPairName, clientConfig);
+ AwsDoc::EC2::deleteKeyPair(keyPairName, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/delete_security_group.cpp b/cpp/example_code/ec2/delete_security_group.cpp
index c3bdd1d49c0..9988efa8b1b 100644
--- a/cpp/example_code/ec2/delete_security_group.cpp
+++ b/cpp/example_code/ec2/delete_security_group.cpp
@@ -13,34 +13,32 @@
**/
// snippet-start:[ec2.cpp.delete_security_group.inc]
-#include
#include
#include
#include
// snippet-end:[ec2.cpp.delete_security_group.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.DeleteSecurityGroup]
//! Delete a security group.
/*!
- \sa DeleteSecurityGroup()
\param securityGroupID: A security group ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::DeleteSecurityGroup(const Aws::String &securityGroupID,
+bool AwsDoc::EC2::deleteSecurityGroup(const Aws::String &securityGroupID,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.delete_security_group.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
Aws::EC2::Model::DeleteSecurityGroupRequest request;
request.SetGroupId(securityGroupID);
- auto outcome = ec2Client.DeleteSecurityGroup(request);
+ Aws::EC2::Model::DeleteSecurityGroupOutcome outcome = ec2Client.DeleteSecurityGroup(request);
if (!outcome.IsSuccess()) {
std::cerr << "Failed to delete security group " << securityGroupID <<
":" << outcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
std::cout << "Successfully deleted security group " << securityGroupID <<
std::endl;
}
@@ -48,6 +46,7 @@ bool AwsDoc::EC2::DeleteSecurityGroup(const Aws::String &securityGroupID,
return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.DeleteSecurityGroup]
/*
* main function
@@ -74,7 +73,7 @@ int main(int argc, char **argv) {
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
Aws::String groupID = argv[1];
- AwsDoc::EC2::DeleteSecurityGroup(groupID, clientConfig);
+ AwsDoc::EC2::deleteSecurityGroup(groupID, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/describe_addresses.cpp b/cpp/example_code/ec2/describe_addresses.cpp
index f4955a4f953..6c75cc11ebd 100644
--- a/cpp/example_code/ec2/describe_addresses.cpp
+++ b/cpp/example_code/ec2/describe_addresses.cpp
@@ -13,7 +13,6 @@
**/
// snippet-start:[ec2.cpp.describe_addresses.inc]
-#include
#include
#include
#include
@@ -22,25 +21,25 @@
// snippet-end:[ec2.cpp.describe_addresses.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.DescribeAddresses]
//! Describe all Elastic IP addresses.
/*!
- \sa DescribeAddresses()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::DescribeAddresses(
+bool AwsDoc::EC2::describeAddresses(
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.describe_addresses.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
Aws::EC2::Model::DescribeAddressesRequest request;
- auto outcome = ec2Client.DescribeAddresses(request);
+ Aws::EC2::Model::DescribeAddressesOutcome outcome = ec2Client.DescribeAddresses(request);
if (outcome.IsSuccess()) {
std::cout << std::left << std::setw(20) << "InstanceId" <<
std::setw(15) << "Public IP" << std::setw(10) << "Domain" <<
std::setw(30) << "Allocation ID" << std::setw(25) <<
"NIC ID" << std::endl;
- const auto &addresses = outcome.GetResult().GetAddresses();
+ const Aws::Vector &addresses = outcome.GetResult().GetAddresses();
for (const auto &address: addresses) {
Aws::String domainString =
Aws::EC2::Model::DomainTypeMapper::GetNameForDomainType(
@@ -52,8 +51,7 @@ bool AwsDoc::EC2::DescribeAddresses(
std::setw(30) << address.GetAllocationId() << std::setw(25)
<< address.GetNetworkInterfaceId() << std::endl;
}
- }
- else {
+ } else {
std::cerr << "Failed to describe Elastic IP addresses:" <<
outcome.GetError().GetMessage() << std::endl;
}
@@ -61,6 +59,7 @@ bool AwsDoc::EC2::DescribeAddresses(
return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.DescribeAddresses]
/*
*
@@ -82,7 +81,7 @@ int main(int argc, char **argv) {
Aws::Client::ClientConfiguration clientConfig;
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
- AwsDoc::EC2::DescribeAddresses(clientConfig);
+ AwsDoc::EC2::describeAddresses(clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/describe_availability_zones.cpp b/cpp/example_code/ec2/describe_availability_zones.cpp
new file mode 100644
index 00000000000..0583a739f9e
--- /dev/null
+++ b/cpp/example_code/ec2/describe_availability_zones.cpp
@@ -0,0 +1,91 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Before running this C++ code example, set up your development environment, including your credentials.
+ *
+ * For more information, see the following documentation topic:
+ *
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started.html
+ *
+ * For information on the structure of the code examples and how to build and run the examples, see
+ * https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/getting-started-code-examples.html.
+ *
+ **/
+
+#include
+// snippet-start:[ec2.cpp.describe_zones.inc]
+#include
+// snippet-end:[ec2.cpp.describe_zones.inc]
+#include
+#include
+#include "ec2_samples.h"
+
+// snippet-start:[cpp.example_code.ec2.DescribeAvailabilityZones]
+
+//! DescribeAvailabilityZones
+/*!
+ \param clientConfiguration: AWS client configuration.
+ \return bool: Function succeeded.
+*/
+int AwsDoc::EC2::describeAvailabilityZones(const Aws::Client::ClientConfiguration &clientConfiguration) {
+ Aws::EC2::EC2Client ec2Client(clientConfiguration);
+ // snippet-start:[ec2.cpp.describe_zones.code]
+ Aws::EC2::Model::DescribeAvailabilityZonesRequest request;
+ Aws::EC2::Model::DescribeAvailabilityZonesOutcome outcome = ec2Client.DescribeAvailabilityZones(request);
+
+ if (outcome.IsSuccess()) {
+ std::cout << std::left <<
+ std::setw(32) << "ZoneName" <<
+ std::setw(20) << "State" <<
+ std::setw(32) << "Region" << std::endl;
+
+ const auto &zones =
+ outcome.GetResult().GetAvailabilityZones();
+
+ for (const auto &zone: zones) {
+ Aws::String stateString =
+ Aws::EC2::Model::AvailabilityZoneStateMapper::GetNameForAvailabilityZoneState(
+ zone.GetState());
+ std::cout << std::left <<
+ std::setw(32) << zone.GetZoneName() <<
+ std::setw(20) << stateString <<
+ std::setw(32) << zone.GetRegionName() << std::endl;
+ }
+ } else {
+ std::cerr << "Failed to describe availability zones:" <<
+ outcome.GetError().GetMessage() << std::endl;
+
+ }
+ // snippet-end:[ec2.cpp.describe_zones.code]
+
+ return outcome.IsSuccess();
+}
+// snippet-end:[cpp.example_code.ec2.DescribeAvailabilityZones]
+
+/*
+ * main function
+ *
+ * Usage:
+ * describe_availability_zones
+ */
+
+#ifndef TESTING_BUILD
+
+int main() {
+ Aws::SDKOptions options;
+ InitAPI(options);
+
+ {
+ Aws::Client::ClientConfiguration clientConfig;
+ // Optional: Set to the AWS Region (overrides config file).
+ // clientConfig.region = "us-east-1";
+ AwsDoc::EC2::describeAvailabilityZones(clientConfig);
+ }
+
+ ShutdownAPI(options);
+ return 0;
+}
+
+#endif // TESTING_BUILD
+
+
diff --git a/cpp/example_code/ec2/describe_instances.cpp b/cpp/example_code/ec2/describe_instances.cpp
index 23929659be6..053e23ceb43 100644
--- a/cpp/example_code/ec2/describe_instances.cpp
+++ b/cpp/example_code/ec2/describe_instances.cpp
@@ -13,7 +13,6 @@
**/
// snippet-start:[ec2.cpp.describe_instances.inc]
-#include
#include
#include
#include
@@ -22,13 +21,13 @@
// snippet-end:[ec2.cpp.describe_instances.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.DescribeInstances]
//! Describe all Amazon Elastic Compute Cloud (Amazon EC2) instances associated with an account.
/*!
- \sa DescribeInstances()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::DescribeInstances(
+bool AwsDoc::EC2::describeInstances(
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.describe_instances.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
@@ -36,7 +35,7 @@ bool AwsDoc::EC2::DescribeInstances(
bool header = false;
bool done = false;
while (!done) {
- auto outcome = ec2Client.DescribeInstances(request);
+ Aws::EC2::Model::DescribeInstancesOutcome outcome = ec2Client.DescribeInstances(request);
if (outcome.IsSuccess()) {
if (!header) {
std::cout << std::left <<
@@ -72,7 +71,7 @@ bool AwsDoc::EC2::DescribeInstances(
const std::vector &tags = instance.GetTags();
auto nameIter = std::find_if(tags.cbegin(), tags.cend(),
[](const Aws::EC2::Model::Tag &tag) {
- return tag.GetKey() == "Name";
+ return tag.GetKey() == "Name";
});
if (nameIter != tags.cend()) {
name = nameIter->GetValue();
@@ -89,12 +88,10 @@ bool AwsDoc::EC2::DescribeInstances(
if (!outcome.GetResult().GetNextToken().empty()) {
request.SetNextToken(outcome.GetResult().GetNextToken());
- }
- else {
+ } else {
done = true;
}
- }
- else {
+ } else {
std::cerr << "Failed to describe EC2 instances:" <<
outcome.GetError().GetMessage() << std::endl;
return false;
@@ -104,6 +101,7 @@ bool AwsDoc::EC2::DescribeInstances(
return true;
}
+// snippet-end:[cpp.example_code.ec2.DescribeInstances]
/*
*
@@ -125,7 +123,7 @@ int main(int argc, char **argv) {
Aws::Client::ClientConfiguration clientConfig;
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
- AwsDoc::EC2::DescribeInstances(clientConfig);
+ AwsDoc::EC2::describeInstances(clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/describe_key_pairs.cpp b/cpp/example_code/ec2/describe_key_pairs.cpp
index 86b57104987..5a00a8cfd40 100644
--- a/cpp/example_code/ec2/describe_key_pairs.cpp
+++ b/cpp/example_code/ec2/describe_key_pairs.cpp
@@ -13,7 +13,6 @@
**/
// snippet-start:[ec2.cpp.describe_key_pairs.inc]
-#include
#include
#include
#include
@@ -22,19 +21,19 @@
// snippet-end:[ec2.cpp.describe_key_pairs.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.DescribeKeyPairs]
//! Describe all Amazon Elastic Compute Cloud (Amazon EC2) instance key pairs.
/*!
- \sa DescribeKeyPairs()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::DescribeKeyPairs(
+bool AwsDoc::EC2::describeKeyPairs(
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.describe_key_pairs.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
Aws::EC2::Model::DescribeKeyPairsRequest request;
- auto outcome = ec2Client.DescribeKeyPairs(request);
+ Aws::EC2::Model::DescribeKeyPairsOutcome outcome = ec2Client.DescribeKeyPairs(request);
if (outcome.IsSuccess()) {
std::cout << std::left <<
std::setw(32) << "Name" <<
@@ -47,8 +46,7 @@ bool AwsDoc::EC2::DescribeKeyPairs(
std::setw(32) << key_pair.GetKeyName() <<
std::setw(64) << key_pair.GetKeyFingerprint() << std::endl;
}
- }
- else {
+ } else {
std::cerr << "Failed to describe key pairs:" <<
outcome.GetError().GetMessage() << std::endl;
}
@@ -56,6 +54,7 @@ bool AwsDoc::EC2::DescribeKeyPairs(
return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.DescribeKeyPairs]
/*
*
@@ -74,7 +73,7 @@ int main(int argc, char **argv) {
Aws::Client::ClientConfiguration clientConfig;
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
- AwsDoc::EC2::DescribeKeyPairs(clientConfig);
+ AwsDoc::EC2::describeKeyPairs(clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/describe_regions_and_zones.cpp b/cpp/example_code/ec2/describe_regions_and_zones.cpp
index 7eddebfc5b6..f0c38aedcfc 100644
--- a/cpp/example_code/ec2/describe_regions_and_zones.cpp
+++ b/cpp/example_code/ec2/describe_regions_and_zones.cpp
@@ -13,35 +13,27 @@
**/
// snippet-start:[ec2.cpp.describe_regions.inc]
-#include
#include
#include
-#include
// snippet-end:[ec2.cpp.describe_regions.inc]
-// snippet-start:[ec2.cpp.describe_zones.inc]
-#include
-#include
-// snippet-end:[ec2.cpp.describe_zones.inc]
#include
#include
#include "ec2_samples.h"
-//! Describe all Amazon Elastic Compute Cloud (Amazon EC2) Regions and Availability Zones.
+
+// snippet-start:[cpp.example_code.ec2.DescribeRegions]
+//! Describe all Amazon Elastic Compute Cloud (Amazon EC2) Regions.
/*!
- \sa DescribeRegionsAndZones()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::DescribeRegionsAndZones(
+bool AwsDoc::EC2::describeRegions(
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.describe_regions.code]
- // snippet-start:[cpp.example_code.ec2.describe_regions.client]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
- // snippet-end:[cpp.example_code.ec2.describe_regions.client]
- // snippet-start:[cpp.example_code.ec2.DescribeRegions]
+
Aws::EC2::Model::DescribeRegionsRequest request;
- auto outcome = ec2Client.DescribeRegions(request);
- bool result = true;
+ Aws::EC2::Model::DescribeRegionsOutcome outcome = ec2Client.DescribeRegions(request);
if (outcome.IsSuccess()) {
std::cout << std::left <<
std::setw(32) << "RegionName" <<
@@ -53,50 +45,18 @@ bool AwsDoc::EC2::DescribeRegionsAndZones(
std::setw(32) << region.GetRegionName() <<
std::setw(64) << region.GetEndpoint() << std::endl;
}
- }
- else {
+ } else {
std::cerr << "Failed to describe regions:" <<
outcome.GetError().GetMessage() << std::endl;
- result = false;
}
- // snippet-end:[cpp.example_code.ec2.DescribeRegions]
// snippet-end:[ec2.cpp.describe_regions.code]
std::cout << std::endl;
- // snippet-start:[ec2.cpp.describe_zones.code]
- Aws::EC2::Model::DescribeAvailabilityZonesRequest describe_request;
- auto describe_outcome = ec2Client.DescribeAvailabilityZones(describe_request);
-
- if (describe_outcome.IsSuccess()) {
- std::cout << std::left <<
- std::setw(32) << "ZoneName" <<
- std::setw(20) << "State" <<
- std::setw(32) << "Region" << std::endl;
-
- const auto &zones =
- describe_outcome.GetResult().GetAvailabilityZones();
-
- for (const auto &zone: zones) {
- Aws::String stateString =
- Aws::EC2::Model::AvailabilityZoneStateMapper::GetNameForAvailabilityZoneState(
- zone.GetState());
- std::cout << std::left <<
- std::setw(32) << zone.GetZoneName() <<
- std::setw(20) << stateString <<
- std::setw(32) << zone.GetRegionName() << std::endl;
- }
- }
- else {
- std::cerr << "Failed to describe availability zones:" <<
- describe_outcome.GetError().GetMessage() << std::endl;
- result = false;
- }
- // snippet-end:[ec2.cpp.describe_zones.code]
-
- return result;
+ return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.DescribeRegions]
/*
*
@@ -118,7 +78,7 @@ int main(int argc, char **argv) {
Aws::Client::ClientConfiguration clientConfig;
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
- AwsDoc::EC2::DescribeRegionsAndZones(clientConfig);
+ AwsDoc::EC2::describeRegions(clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/describe_security_groups.cpp b/cpp/example_code/ec2/describe_security_groups.cpp
index 1ab08d5bb9f..440f5855103 100644
--- a/cpp/example_code/ec2/describe_security_groups.cpp
+++ b/cpp/example_code/ec2/describe_security_groups.cpp
@@ -13,7 +13,6 @@
**/
// snippet-start:[ec2.cpp.describe_security_groups.inc]
-#include
#include
#include
#include
@@ -22,14 +21,14 @@
// snippet-end:[ec2.cpp.describe_security_groups.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.DescribeSecurityGroups]
//! Describe all Amazon Elastic Compute Cloud (Amazon EC2) security groups, or a specific group.
/*!
- \sa DescribeSecurityGroups()
\param groupID: A group ID, ignored if empty.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::DescribeSecurityGroups(const Aws::String &groupID,
+bool AwsDoc::EC2::describeSecurityGroups(const Aws::String &groupID,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.describe_security_groups.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
@@ -45,7 +44,7 @@ bool AwsDoc::EC2::DescribeSecurityGroups(const Aws::String &groupID,
request.SetNextToken(nextToken);
}
- auto outcome = ec2Client.DescribeSecurityGroups(request);
+ Aws::EC2::Model::DescribeSecurityGroupsOutcome outcome = ec2Client.DescribeSecurityGroups(request);
if (outcome.IsSuccess()) {
std::cout << std::left <<
std::setw(32) << "Name" <<
@@ -64,8 +63,7 @@ bool AwsDoc::EC2::DescribeSecurityGroups(const Aws::String &groupID,
std::setw(64) << securityGroup.GetDescription() <<
std::endl;
}
- }
- else {
+ } else {
std::cerr << "Failed to describe security groups:" <<
outcome.GetError().GetMessage() << std::endl;
return false;
@@ -77,6 +75,7 @@ bool AwsDoc::EC2::DescribeSecurityGroups(const Aws::String &groupID,
return true;
}
+// snippet-end:[cpp.example_code.ec2.DescribeSecurityGroups]
/*
*
@@ -105,7 +104,7 @@ int main(int argc, char **argv) {
Aws::Client::ClientConfiguration clientConfig;
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
- AwsDoc::EC2::DescribeSecurityGroups(groupID, clientConfig);
+ AwsDoc::EC2::describeSecurityGroups(groupID, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/ec2_samples.h b/cpp/example_code/ec2/ec2_samples.h
index 15b8a47b4b3..dd39d716c81 100644
--- a/cpp/example_code/ec2/ec2_samples.h
+++ b/cpp/example_code/ec2/ec2_samples.h
@@ -6,6 +6,7 @@
#define EC2_EXAMPLES_EC2_SAMPLES_H
#include
+#include
namespace AwsDoc {
@@ -13,190 +14,213 @@ namespace AwsDoc {
//! Allocate an Elastic IP address and associate it with an Amazon Elastic Compute Cloud
//! (Amazon EC2) instance.
/*!
- \sa AllocateAndAssociateAddress()
\param instanceID: An EC2 instance ID.
- \param allocationId: String to return the allocation ID of the address.
+ \param publicIPAddress[out]: String to return the public IP address.
+ \param[out] allocationID: String to return the allocation ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool AllocateAndAssociateAddress(const Aws::String &instanceId,
- Aws::String &allocationId,
+ bool allocateAndAssociateAddress(const Aws::String &instanceId, Aws::String &publicIPAddress,
+ Aws::String &allocationID,
const Aws::Client::ClientConfiguration &clientConfiguration);
+ //! Associate an Elastic IP address with an EC2 instance.
+ /*!
+ \param instanceId: An EC2 instance ID.
+ \param allocationId: An Elastic IP allocation ID.
+ \param[out] associationID: String to receive the association ID.
+ \param clientConfiguration: AWS client configuration.
+ \return bool: True if the address was associated with the instance; otherwise, false.
+ */
+ bool
+ associateAddress(const Aws::String &instanceId, const Aws::String &allocationId, Aws::String &associationID,
+ const Aws::Client::ClientConfiguration &clientConfiguration);
+
+ //! Authorize ingress to an Amazon Elastic Compute Cloud (Amazon EC2) group.
+ /*!
+ \param groupID: The EC2 group ID.
+ \param clientConfiguration: The ClientConfiguration object.
+ \return bool: True if the operation was successful, false otherwise.
+ */
+ bool
+ authorizeSecurityGroupIngress(const Aws::String &groupID,
+ const Aws::Client::ClientConfiguration &clientConfiguration);
+
//! Create an EC2 instance key pair.
/*!
- \sa CreateKeyPair()
\param keyPairName: A name for a key pair.
+ \param keyFilePath: File path where the credentials are stored. Ignored if it is an empty string;
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool CreateKeyPair(const Aws::String &keyPairName,
+ bool createKeyPair(const Aws::String &keyPairName, const Aws::String &keyFilePath,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Create a security group.
/*!
- \sa CreateSecurityGroup()
\param groupName: A security group name.
\param description: A description.
\param vpcID: A virtual private cloud (VPC) ID.
- \param groupIDResult: A string to receive the group ID.
+ \param[out] groupIDResult: A string to receive the group ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool CreateSecurityGroup(const Aws::String &groupName,
+ bool createSecurityGroup(const Aws::String &groupName,
const Aws::String &description,
const Aws::String &vpcID,
Aws::String &groupIDResult,
const Aws::Client::ClientConfiguration &clientConfiguration);
+ //! Add or overwrite only the specified tags for the specified Amazon Elastic Compute Cloud (Amazon EC2) resource or resources.
+ /*!
+ \param resources: The resources for the tags.
+ \param tags: Map of tag keys and values.
+ \param clientConfiguration: AWS client configuration.
+ \return bool: Function succeeded.
+ */
+ bool createTags(const Aws::Vector &resources,
+ const Aws::Vector &tags,
+ const Aws::Client::ClientConfiguration &clientConfiguration);
+
//! Delete an Amazon EC2 key pair.
/*!
- \sa DeleteKeyPair()
\param keyPairName: A name for a key pair.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool DeleteKeyPair(const Aws::String &keyPairName,
+ bool deleteKeyPair(const Aws::String &keyPairName,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Delete a security group.
/*!
- \sa DeleteSecurityGroup()
\param securityGroupID: A security group ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool DeleteSecurityGroup(const Aws::String &securityGroupID,
+ bool deleteSecurityGroup(const Aws::String &securityGroupID,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Describe all Elastic IP addresses.
/*!
- \sa DescribeAddresses()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
bool
- DescribeAddresses(const Aws::Client::ClientConfiguration &clientConfiguration);
+ describeAddresses(const Aws::Client::ClientConfiguration &clientConfiguration);
+
+ //! DescribeAvailabilityZones
+ /*!
+ \param clientConfiguration: AWS client configuration.
+ \return bool: Function succeeded.
+ */
+ int describeAvailabilityZones(const Aws::Client::ClientConfiguration &clientConfiguration);
//! Describe all EC2 instances associated with an account.
/*!
- \sa DescribeInstances()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
bool
- DescribeInstances(const Aws::Client::ClientConfiguration &clientConfiguration);
+ describeInstances(const Aws::Client::ClientConfiguration &clientConfiguration);
//! Describe all EC2 instance key pairs.
/*!
- \sa DescribeKeyPairs()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
bool
- DescribeKeyPairs(const Aws::Client::ClientConfiguration &clientConfiguration);
+ describeKeyPairs(const Aws::Client::ClientConfiguration &clientConfiguration);
//! Describe all EC2 Regions and Availability Zones.
/*!
- \sa DescribeRegionsAndZones()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool DescribeRegionsAndZones(
+ bool describeRegions(
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Describe all EC2 security groups, or a specific group.
/*!
- \sa DescribeSecurityGroups()
\param groupID: A group name, ignored if empty.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool DescribeSecurityGroups(const Aws::String &groupID,
+ bool describeSecurityGroups(const Aws::String &groupID,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Enable detailed monitoring for an EC2 instance.
/*!
- \sa EnableMonitoring()
\param instanceId: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool EnableMonitoring(const Aws::String &instanceId,
+ bool enableMonitoring(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Disable monitoring for an EC2 instance.
/*!
- \sa DisableMonitoring()
\param instanceId: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool DisableMonitoring(const Aws::String &instanceId,
+ bool disableMonitoring(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Reboot an EC2 instance.
/*!
- \sa RebootInstance()
\param instanceID: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool RebootInstance(const Aws::String &instanceId,
+ bool rebootInstance(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Release an Elastic IP address.
/*!
- \sa ReleaseAddress()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool ReleaseAddress(const Aws::String &allocationID,
+ bool releaseAddress(const Aws::String &allocationID,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Launch an EC2 instance.
/*!
- \sa RunInstance()
\param instanceName: A name for the EC2 instance.
\param amiId: An Amazon Machine Image (AMI) identifier.
- \param instanceID: String to return the instance ID.
+ \param[out] instanceID: String to return the instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool RunInstance(const Aws::String &instanceName,
+ bool runInstance(const Aws::String &instanceName,
const Aws::String &amiId,
Aws::String &instanceID,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Start an EC2 instance.
/*!
- \sa StartInstance()
\param instanceID: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool StartInstance(const Aws::String &instanceId,
+ bool startInstance(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Stop an EC2 instance.
/*!
- \sa StopInstance()
\param instanceID: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
- bool StopInstance(const Aws::String &instanceId,
+ bool stopInstance(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration);
//! Terminate an EC2 instance.
/*!
- \sa TerminateInstances()
\param instanceID: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-
- bool TerminateInstances(const Aws::String &instanceID,
+ bool terminateInstances(const Aws::String &instanceID,
const Aws::Client::ClientConfiguration &clientConfiguration);
} // EC2
diff --git a/cpp/example_code/ec2/hello_ec2/hello_ec2.cpp b/cpp/example_code/ec2/hello_ec2/hello_ec2.cpp
index bf0bf48200a..d1684faa6d0 100644
--- a/cpp/example_code/ec2/hello_ec2/hello_ec2.cpp
+++ b/cpp/example_code/ec2/hello_ec2/hello_ec2.cpp
@@ -27,6 +27,9 @@
*/
int main(int argc, char **argv) {
+ (void)argc;
+ (void)argv;
+
Aws::SDKOptions options;
// Optionally change the log level for debugging.
// options.loggingOptions.logLevel = Utils::Logging::LogLevel::Debug;
@@ -42,7 +45,7 @@ int main(int argc, char **argv) {
bool header = false;
bool done = false;
while (!done) {
- auto outcome = ec2Client.DescribeInstances(request);
+ Aws::EC2::Model::DescribeInstancesOutcome outcome = ec2Client.DescribeInstances(request);
if (outcome.IsSuccess()) {
if (!header) {
std::cout << std::left <<
diff --git a/cpp/example_code/ec2/monitor_instance.cpp b/cpp/example_code/ec2/monitor_instance.cpp
index f0e06773aab..f63afe49c81 100644
--- a/cpp/example_code/ec2/monitor_instance.cpp
+++ b/cpp/example_code/ec2/monitor_instance.cpp
@@ -13,24 +13,21 @@
**/
// snippet-start:[ec2.cpp.monitor_instance.inc]
-#include
#include
#include
#include
-#include
#include
// snippet-end:[ec2.cpp.monitor_instance.inc]
#include "ec2_samples.h"
-
+// snippet-start:[cpp.example_code.ec2.MonitorInstances]
//! Enable detailed monitoring for an Amazon Elastic Compute Cloud (Amazon EC2) instance.
/*!
- \sa EnableMonitoring()
\param instanceId: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::EnableMonitoring(const Aws::String &instanceId,
+bool AwsDoc::EC2::enableMonitoring(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.enable_monitor_instance.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
@@ -38,30 +35,28 @@ bool AwsDoc::EC2::EnableMonitoring(const Aws::String &instanceId,
request.AddInstanceIds(instanceId);
request.SetDryRun(true);
- auto dry_run_outcome = ec2Client.MonitorInstances(request);
- if (dry_run_outcome.IsSuccess()) {
+ Aws::EC2::Model::MonitorInstancesOutcome dryRunOutcome = ec2Client.MonitorInstances(request);
+ if (dryRunOutcome.IsSuccess()) {
std::cerr
<< "Failed dry run to enable monitoring on instance. A dry run should trigger an error."
<<
std::endl;
return false;
- }
- else if (dry_run_outcome.GetError().GetErrorType()
- != Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
+ } else if (dryRunOutcome.GetError().GetErrorType()
+ != Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
std::cerr << "Failed dry run to enable monitoring on instance " <<
- instanceId << ": " << dry_run_outcome.GetError().GetMessage() <<
+ instanceId << ": " << dryRunOutcome.GetError().GetMessage() <<
std::endl;
return false;
}
request.SetDryRun(false);
- auto monitorInstancesOutcome = ec2Client.MonitorInstances(request);
+ Aws::EC2::Model::MonitorInstancesOutcome monitorInstancesOutcome = ec2Client.MonitorInstances(request);
if (!monitorInstancesOutcome.IsSuccess()) {
std::cerr << "Failed to enable monitoring on instance " <<
instanceId << ": " <<
monitorInstancesOutcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
std::cout << "Successfully enabled monitoring on instance " <<
instanceId << std::endl;
}
@@ -69,15 +64,17 @@ bool AwsDoc::EC2::EnableMonitoring(const Aws::String &instanceId,
return monitorInstancesOutcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.MonitorInstances]
+
+// snippet-start:[cpp.example_code.ec2.UnmonitorInstances]
//! Disable monitoring for an EC2 instance.
/*!
- \sa DisableMonitoring()
\param instanceId: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::DisableMonitoring(const Aws::String &instanceId,
+bool AwsDoc::EC2::disableMonitoring(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.disable_monitor_instance.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
@@ -85,30 +82,28 @@ bool AwsDoc::EC2::DisableMonitoring(const Aws::String &instanceId,
unrequest.AddInstanceIds(instanceId);
unrequest.SetDryRun(true);
- auto undryRunOutcome = ec2Client.UnmonitorInstances(unrequest);
- if (undryRunOutcome.IsSuccess()) {
+ Aws::EC2::Model::UnmonitorInstancesOutcome dryRunOutcome = ec2Client.UnmonitorInstances(unrequest);
+ if (dryRunOutcome.IsSuccess()) {
std::cerr
<< "Failed dry run to disable monitoring on instance. A dry run should trigger an error."
<<
std::endl;
return false;
- }
- else if (undryRunOutcome.GetError().GetErrorType() !=
- Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
+ } else if (dryRunOutcome.GetError().GetErrorType() !=
+ Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
std::cout << "Failed dry run to disable monitoring on instance " <<
- instanceId << ": " << undryRunOutcome.GetError().GetMessage() <<
+ instanceId << ": " << dryRunOutcome.GetError().GetMessage() <<
std::endl;
return false;
}
unrequest.SetDryRun(false);
- auto unmonitorInstancesOutcome = ec2Client.UnmonitorInstances(unrequest);
+ Aws::EC2::Model::UnmonitorInstancesOutcome unmonitorInstancesOutcome = ec2Client.UnmonitorInstances(unrequest);
if (!unmonitorInstancesOutcome.IsSuccess()) {
std::cout << "Failed to disable monitoring on instance " << instanceId
<< ": " << unmonitorInstancesOutcome.GetError().GetMessage() <<
std::endl;
- }
- else {
+ } else {
std::cout << "Successfully disable monitoring on instance " <<
instanceId << std::endl;
}
@@ -116,6 +111,7 @@ bool AwsDoc::EC2::DisableMonitoring(const Aws::String &instanceId,
return unmonitorInstancesOutcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.UnmonitorInstances]
/*
*
@@ -150,10 +146,10 @@ int main(int argc, char **argv) {
// clientConfig.region = "us-east-1";
if (enableMonitoring) {
- AwsDoc::EC2::EnableMonitoring(instance_id, clientConfig);
+ AwsDoc::EC2::enableMonitoring(instance_id, clientConfig);
}
else {
- AwsDoc::EC2::DisableMonitoring(instance_id, clientConfig);
+ AwsDoc::EC2::disableMonitoring(instance_id, clientConfig);
}
}
Aws::ShutdownAPI(options);
diff --git a/cpp/example_code/ec2/reboot_instance.cpp b/cpp/example_code/ec2/reboot_instance.cpp
index cd1ab5db3c1..1080a892dfd 100644
--- a/cpp/example_code/ec2/reboot_instance.cpp
+++ b/cpp/example_code/ec2/reboot_instance.cpp
@@ -13,21 +13,20 @@
**/
// snippet-start:[ec2.cpp.reboot_instance.inc]
-#include
#include
#include
#include
// snippet-end:[ec2.cpp.reboot_instance.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.RebootInstances]
//! Reboot an Amazon Elastic Compute Cloud (Amazon EC2) instance.
/*!
- \sa RebootInstance()
\param instanceID: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::RebootInstance(const Aws::String &instanceId,
+bool AwsDoc::EC2::rebootInstance(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.reboot_instance.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
@@ -36,28 +35,26 @@ bool AwsDoc::EC2::RebootInstance(const Aws::String &instanceId,
request.AddInstanceIds(instanceId);
request.SetDryRun(true);
- auto dry_run_outcome = ec2Client.RebootInstances(request);
+ Aws::EC2::Model::RebootInstancesOutcome dry_run_outcome = ec2Client.RebootInstances(request);
if (dry_run_outcome.IsSuccess()) {
std::cerr
<< "Failed dry run to reboot on instance. A dry run should trigger an error."
<<
std::endl;
return false;
- }
- else if (dry_run_outcome.GetError().GetErrorType()
- != Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
+ } else if (dry_run_outcome.GetError().GetErrorType()
+ != Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
std::cout << "Failed dry run to reboot instance " << instanceId << ": "
<< dry_run_outcome.GetError().GetMessage() << std::endl;
return false;
}
request.SetDryRun(false);
- auto outcome = ec2Client.RebootInstances(request);
+ Aws::EC2::Model::RebootInstancesOutcome outcome = ec2Client.RebootInstances(request);
if (!outcome.IsSuccess()) {
std::cout << "Failed to reboot instance " << instanceId << ": " <<
outcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
std::cout << "Successfully rebooted instance " << instanceId <<
std::endl;
}
@@ -65,6 +62,7 @@ bool AwsDoc::EC2::RebootInstance(const Aws::String &instanceId,
return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.RebootInstances]
/*
*
@@ -92,7 +90,7 @@ int main(int argc, char **argv) {
Aws::Client::ClientConfiguration clientConfig;
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
- AwsDoc::EC2::RebootInstance(instanceId, clientConfig);
+ AwsDoc::EC2::rebootInstance(instanceId, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/release_address.cpp b/cpp/example_code/ec2/release_address.cpp
index 2c41ce318fe..b510980fd43 100644
--- a/cpp/example_code/ec2/release_address.cpp
+++ b/cpp/example_code/ec2/release_address.cpp
@@ -13,20 +13,19 @@
**/
// snippet-start:[ec2.cpp.release_address.inc]
-#include
#include
#include
#include
// snippet-end:[ec2.cpp.release_address.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.ReleaseAddress]
//! Release an Elastic IP address.
/*!
- \sa ReleaseAddress()
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::ReleaseAddress(const Aws::String &allocationID,
+bool AwsDoc::EC2::releaseAddress(const Aws::String &allocationID,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.release_address.code]
Aws::EC2::EC2Client ec2(clientConfiguration);
@@ -34,13 +33,12 @@ bool AwsDoc::EC2::ReleaseAddress(const Aws::String &allocationID,
Aws::EC2::Model::ReleaseAddressRequest request;
request.SetAllocationId(allocationID);
- auto outcome = ec2.ReleaseAddress(request);
+ Aws::EC2::Model::ReleaseAddressOutcome outcome = ec2.ReleaseAddress(request);
if (!outcome.IsSuccess()) {
std::cerr << "Failed to release Elastic IP address " <<
allocationID << ":" << outcome.GetError().GetMessage() <<
std::endl;
- }
- else {
+ } else {
std::cout << "Successfully released Elastic IP address " <<
allocationID << std::endl;
}
@@ -48,6 +46,7 @@ bool AwsDoc::EC2::ReleaseAddress(const Aws::String &allocationID,
return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.ReleaseAddress]
/*
*
@@ -75,7 +74,7 @@ int main(int argc, char **argv) {
Aws::Client::ClientConfiguration clientConfig;
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
- AwsDoc::EC2::ReleaseAddress(allocationID, clientConfig);
+ AwsDoc::EC2::releaseAddress(allocationID, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/create_instance.cpp b/cpp/example_code/ec2/run_instances.cpp
similarity index 65%
rename from cpp/example_code/ec2/create_instance.cpp
rename to cpp/example_code/ec2/run_instances.cpp
index 2ae4c9dc4c1..13fe5a7e535 100644
--- a/cpp/example_code/ec2/create_instance.cpp
+++ b/cpp/example_code/ec2/run_instances.cpp
@@ -15,33 +15,27 @@
// snippet-start:[ec2.cpp.create_instance.inc]
#include
#include
-#include
#include
-#include
#include
// snippet-end:[ec2.cpp.create_instance.inc]
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.RunInstances]
//! Launch an Amazon Elastic Compute Cloud (Amazon EC2) instance.
/*!
- \sa RunInstance()
\param instanceName: A name for the EC2 instance.
\param amiId: An Amazon Machine Image (AMI) identifier.
- \param instanceID: String to return the instance ID.
+ \param[out] instanceID: String to return the instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-
-bool AwsDoc::EC2::RunInstance(const Aws::String &instanceName,
+bool AwsDoc::EC2::runInstance(const Aws::String &instanceName,
const Aws::String &amiId,
Aws::String &instanceID,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.create_instance.code]
- // snippet-start:[cpp.example_code.ec2.create_instance.client]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
- // snippet-end:[cpp.example_code.ec2.create_instance.client]
- // snippet-start:[cpp.example_code.ec2.RunInstances]
Aws::EC2::Model::RunInstancesRequest runRequest;
runRequest.SetImageId(amiId);
runRequest.SetInstanceType(Aws::EC2::Model::InstanceType::t1_micro);
@@ -64,41 +58,20 @@ bool AwsDoc::EC2::RunInstance(const Aws::String &instanceName,
runOutcome.GetError().GetMessage() << std::endl;
return false;
}
- // snippet-end:[ec2.cpp.create_instance.code]
+ // snippet-end:[ec2.cpp.create_instance.code]
instanceID = instances[0].GetInstanceId();
- // snippet-end:[cpp.example_code.ec2.RunInstances]
-
- // snippet-start:[cpp.example_code.ec2.CreateTags]
- Aws::EC2::Model::Tag nameTag;
- nameTag.SetKey("Name");
- nameTag.SetValue(instanceName);
-
- Aws::EC2::Model::CreateTagsRequest createRequest;
- createRequest.AddResources(instanceID);
- createRequest.AddTags(nameTag);
-
- Aws::EC2::Model::CreateTagsOutcome createOutcome = ec2Client.CreateTags(
- createRequest);
- if (!createOutcome.IsSuccess()) {
- std::cerr << "Failed to tag ec2 instance " << instanceID <<
- " with name " << instanceName << ":" <<
- createOutcome.GetError().GetMessage() << std::endl;
- return false;
- }
- // snippet-end:[cpp.example_code.ec2.CreateTags]
- std::cout << "Successfully launched ec2 instance " << instanceName <<
- " based on ami " << amiId << std::endl;
return true;
}
+// snippet-end:[cpp.example_code.ec2.RunInstances]
/*
*
* main function
*
- * Usage: 'run_create_instance '
+ * Usage: 'run_run_instances '
*
*/
@@ -106,7 +79,7 @@ bool AwsDoc::EC2::RunInstance(const Aws::String &instanceName,
int main(int argc, char **argv) {
if (argc != 3) {
- std::cout << "Usage: run_create_instance "
+ std::cout << "Usage: run_run_instances "
<< std::endl;
return 1;
}
@@ -120,7 +93,7 @@ int main(int argc, char **argv) {
Aws::String instanceName = argv[1];
Aws::String amiId = argv[2];
Aws::String instanceID;
- AwsDoc::EC2::RunInstance(instanceName, amiId, instanceID, clientConfig);
+ AwsDoc::EC2::runInstance(instanceName, amiId, instanceID, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/start_stop_instance.cpp b/cpp/example_code/ec2/start_stop_instance.cpp
index 2a3aeb07b7e..761fb1f4221 100644
--- a/cpp/example_code/ec2/start_stop_instance.cpp
+++ b/cpp/example_code/ec2/start_stop_instance.cpp
@@ -14,72 +14,69 @@
**/
// snippet-start:[ec2.cpp.start_instance.inc]
-#include
+
#include
#include
-#include
// snippet-end:[ec2.cpp.start_instance.inc]
// snippet-start:[ec2.cpp.stop_instance.inc]
#include
-#include
// snippet-end:[ec2.cpp.stop_instance.inc]
#include
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.StartInstances]
//! Start an Amazon Elastic Compute Cloud (Amazon EC2) instance.
/*!
- \sa StartInstance()
\param instanceID: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::StartInstance(const Aws::String &instanceId,
+bool AwsDoc::EC2::startInstance(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.start_instance.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
- Aws::EC2::Model::StartInstancesRequest start_request;
- start_request.AddInstanceIds(instanceId);
- start_request.SetDryRun(true);
+ Aws::EC2::Model::StartInstancesRequest startRequest;
+ startRequest.AddInstanceIds(instanceId);
+ startRequest.SetDryRun(true);
- auto dry_run_outcome = ec2Client.StartInstances(start_request);
- if (dry_run_outcome.IsSuccess()) {
+ Aws::EC2::Model::StartInstancesOutcome dryRunOutcome = ec2Client.StartInstances(startRequest);
+ if (dryRunOutcome.IsSuccess()) {
std::cerr
<< "Failed dry run to start instance. A dry run should trigger an error."
<< std::endl;
return false;
- }
- else if (dry_run_outcome.GetError().GetErrorType() !=
- Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
+ } else if (dryRunOutcome.GetError().GetErrorType() !=
+ Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
std::cout << "Failed dry run to start instance " << instanceId << ": "
- << dry_run_outcome.GetError().GetMessage() << std::endl;
+ << dryRunOutcome.GetError().GetMessage() << std::endl;
return false;
}
- start_request.SetDryRun(false);
- auto start_instancesOutcome = ec2Client.StartInstances(start_request);
+ startRequest.SetDryRun(false);
+ Aws::EC2::Model::StartInstancesOutcome startInstancesOutcome = ec2Client.StartInstances(startRequest);
- if (!start_instancesOutcome.IsSuccess()) {
+ if (!startInstancesOutcome.IsSuccess()) {
std::cout << "Failed to start instance " << instanceId << ": " <<
- start_instancesOutcome.GetError().GetMessage() << std::endl;
- }
- else {
+ startInstancesOutcome.GetError().GetMessage() << std::endl;
+ } else {
std::cout << "Successfully started instance " << instanceId <<
std::endl;
}
// snippet-end:[ec2.cpp.start_instance.code]
- return start_instancesOutcome.IsSuccess();
+ return startInstancesOutcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.StartInstances]
+// snippet-start:[cpp.example_code.ec2.StopInstances]
//! Stop an EC2 instance.
/*!
- \sa StopInstance()
\param instanceID: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::StopInstance(const Aws::String &instanceId,
+bool AwsDoc::EC2::stopInstance(const Aws::String &instanceId,
const Aws::Client::ClientConfiguration &clientConfiguration) {
// snippet-start:[ec2.cpp.stop_instance.code]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
@@ -87,27 +84,25 @@ bool AwsDoc::EC2::StopInstance(const Aws::String &instanceId,
request.AddInstanceIds(instanceId);
request.SetDryRun(true);
- auto dry_run_outcome = ec2Client.StopInstances(request);
- if (dry_run_outcome.IsSuccess()) {
+ Aws::EC2::Model::StopInstancesOutcome dryRunOutcome = ec2Client.StopInstances(request);
+ if (dryRunOutcome.IsSuccess()) {
std::cerr
<< "Failed dry run to stop instance. A dry run should trigger an error."
<< std::endl;
return false;
- }
- else if (dry_run_outcome.GetError().GetErrorType() !=
- Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
+ } else if (dryRunOutcome.GetError().GetErrorType() !=
+ Aws::EC2::EC2Errors::DRY_RUN_OPERATION) {
std::cout << "Failed dry run to stop instance " << instanceId << ": "
- << dry_run_outcome.GetError().GetMessage() << std::endl;
+ << dryRunOutcome.GetError().GetMessage() << std::endl;
return false;
}
request.SetDryRun(false);
- auto outcome = ec2Client.StopInstances(request);
+ Aws::EC2::Model::StopInstancesOutcome outcome = ec2Client.StopInstances(request);
if (!outcome.IsSuccess()) {
std::cout << "Failed to stop instance " << instanceId << ": " <<
outcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
std::cout << "Successfully stopped instance " << instanceId <<
std::endl;
}
@@ -120,6 +115,7 @@ void PrintUsage() {
std::cout << "Usage: run_start_stop_instance " <<
std::endl;
}
+// snippet-end:[cpp.example_code.ec2.StopInstances]
/*
*
@@ -160,10 +156,10 @@ int main(int argc, char **argv) {
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
if (start_instance) {
- AwsDoc::EC2::StartInstance(instance_id, clientConfig);
+ AwsDoc::EC2::startInstance(instance_id, clientConfig);
}
else {
- AwsDoc::EC2::StopInstance(instance_id, clientConfig);
+ AwsDoc::EC2::stopInstance(instance_id, clientConfig);
}
}
Aws::ShutdownAPI(options);
diff --git a/cpp/example_code/ec2/terminate_instances.cpp b/cpp/example_code/ec2/terminate_instances.cpp
index eeccf54d55a..d594681f65e 100644
--- a/cpp/example_code/ec2/terminate_instances.cpp
+++ b/cpp/example_code/ec2/terminate_instances.cpp
@@ -12,22 +12,20 @@
*
**/
-#include
#include
#include
#include
#include "ec2_samples.h"
+// snippet-start:[cpp.example_code.ec2.TerminateInstances]
//! Terminate an Amazon Elastic Compute Cloud (Amazon EC2) instance.
/*!
- \sa TerminateInstances()
\param instanceID: An EC2 instance ID.
\param clientConfiguration: AWS client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::EC2::TerminateInstances(const Aws::String &instanceID,
+bool AwsDoc::EC2::terminateInstances(const Aws::String &instanceID,
const Aws::Client::ClientConfiguration &clientConfiguration) {
-// snippet-start:[cpp.example_code.ec2.TerminateInstances]
Aws::EC2::EC2Client ec2Client(clientConfiguration);
Aws::EC2::Model::TerminateInstancesRequest request;
@@ -38,17 +36,16 @@ bool AwsDoc::EC2::TerminateInstances(const Aws::String &instanceID,
if (outcome.IsSuccess()) {
std::cout << "Ec2 instance '" << instanceID <<
"' was terminated." << std::endl;
- }
- else {
+ } else {
std::cerr << "Failed to terminate ec2 instance " << instanceID <<
", " <<
outcome.GetError().GetMessage() << std::endl;
return false;
}
-// snippet-end:[cpp.example_code.ec2.TerminateInstances]
return outcome.IsSuccess();
}
+// snippet-end:[cpp.example_code.ec2.TerminateInstances]
/*
*
@@ -76,7 +73,7 @@ int main(int argc, char **argv) {
// Optional: Set to the AWS Region (overrides config file).
// clientConfig.region = "us-east-1";
Aws::String instanceID = argv[1];
- AwsDoc::EC2::TerminateInstances(instanceID, clientConfig);
+ AwsDoc::EC2::terminateInstances(instanceID, clientConfig);
}
Aws::ShutdownAPI(options);
return 0;
diff --git a/cpp/example_code/ec2/tests/CMakeLists.txt b/cpp/example_code/ec2/tests/CMakeLists.txt
index e93b0060196..bd88a655680 100644
--- a/cpp/example_code/ec2/tests/CMakeLists.txt
+++ b/cpp/example_code/ec2/tests/CMakeLists.txt
@@ -14,10 +14,6 @@ project("${EXAMPLE_SERVICE_NAME}-examples-gtests" )
# Set the C++ standard to use to build this target.
set(CMAKE_CXX_STANDARD 14)
-# Build shared libraries by default.
-set(BUILD_SHARED_LIBS ON)
-
-
if(NOT GTest_FOUND)
include(FetchContent)
FetchContent_Declare(
@@ -47,25 +43,13 @@ add_executable(
${CURRENT_TARGET}
)
-if (WINDOWS_BUILD)
+if (WINDOWS_BUILD AND AWSSDK_INSTALL_AS_SHARED_LIBS)
# set(BIN_SUB_DIR "/Debug") # If you are building from the command line, you may need to uncomment this
- # and set the proper subdirectory to the executables' location.
-
- if (AWSSDK_INSTALL_AS_SHARED_LIBS)
- # Copy relevant AWS SDK for C++ libraries into the current binary directory for running and debugging.
- AWSSDK_CPY_DYN_LIBS(
- CURRENT_TARGET_AWS_DEPENDENCIES
- ""
- ${CMAKE_CURRENT_BINARY_DIR}${BIN_SUB_DIR}
- )
- endif ()
-
- add_custom_command(
- TARGET
- ${CURRENT_TARGET}
- POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy
- ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}${BIN_SUB_DIR}/gtest.dll
+ # and set the proper subdirectory to the executable's location.
+ # Copy relevant AWS SDK for C++ libraries into the current binary directory for running and debugging.
+ AWSSDK_CPY_DYN_LIBS(
+ CURRENT_TARGET_AWS_DEPENDENCIES
+ ""
${CMAKE_CURRENT_BINARY_DIR}${BIN_SUB_DIR}
)
endif ()
diff --git a/cpp/example_code/ec2/tests/ec2_gtests.cpp b/cpp/example_code/ec2/tests/ec2_gtests.cpp
index d2dab9633e8..25ae6288afb 100644
--- a/cpp/example_code/ec2/tests/ec2_gtests.cpp
+++ b/cpp/example_code/ec2/tests/ec2_gtests.cpp
@@ -14,6 +14,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -23,6 +24,7 @@ Aws::SDKOptions AwsDocTest::EC2_GTests::s_options;
std::unique_ptr AwsDocTest::EC2_GTests::s_clientConfig;
Aws::String AwsDocTest::EC2_GTests::s_instanceID;
Aws::String AwsDocTest::EC2_GTests::s_vpcID;
+Aws::String AwsDocTest::EC2_GTests::s_securityGroupID;
void AwsDocTest::EC2_GTests::SetUpTestSuite() {
@@ -39,6 +41,11 @@ void AwsDocTest::EC2_GTests::TearDownTestSuite() {
s_instanceID.clear();
}
+ if (!s_securityGroupID.empty()) {
+ deleteSecurityGroup(s_securityGroupID);
+ s_securityGroupID.clear();
+ }
+
ShutdownAPI(s_options);
}
@@ -101,7 +108,7 @@ bool AwsDocTest::EC2_GTests::releaseIPAddress(const Aws::String &allocationID) {
auto outcome = ec2Client.ReleaseAddress(request);
if (!outcome.IsSuccess()) {
- std::cerr << "Failed to release Elastic IP address " <<
+ std::cerr << "AwsDocTest::EC2_GTests Failed to release Elastic IP address " <<
allocationID << ":" << outcome.GetError().GetMessage() <<
std::endl;
}
@@ -131,8 +138,7 @@ Aws::String AwsDocTest::EC2_GTests::getAmiID() {
if (s_clientConfig->region == "us-east-1") {
result = "ami-0dfcb1ef8550277af";
- }
- else {
+ } else {
std::cerr << "EC2_GTests::getAmiID no amiID specified for the region "
<< s_clientConfig->region << std::endl;
}
@@ -155,15 +161,13 @@ Aws::String AwsDocTest::EC2_GTests::createInstance() {
if (!runOutcome.IsSuccess()) {
std::cerr << "Failed to launch ec2 instance based on ami " << amiID
<< ":" << runOutcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
const Aws::Vector &instances = runOutcome.GetResult().GetInstances();
if (instances.empty()) {
std::cerr << "Failed to launch ec2 instance based on ami " <<
amiID << ":" <<
runOutcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
instanceID = instances[0].GetInstanceId();
}
}
@@ -181,8 +185,7 @@ Aws::String AwsDocTest::EC2_GTests::getCachedInstanceID() {
if (instanceStateName == Aws::EC2::Model::InstanceStateName::running) {
s_instanceID = instanceID;
- }
- else {
+ } else {
std::cerr << "Error starting instance, instanceStateName '"
<< Aws::EC2::Model::InstanceStateNameMapper::GetNameForInstanceStateName(
instanceStateName)
@@ -222,13 +225,11 @@ AwsDocTest::EC2_GTests::getInstanceState(const Aws::String &instanceID) {
if (!outcome.GetResult().GetReservations().empty() &&
!outcome.GetResult().GetReservations()[0].GetInstances().empty()) {
instanceState = outcome.GetResult().GetReservations()[0].GetInstances()[0].GetState().GetName();
- }
- else {
+ } else {
std::cerr << "EC2_GTests::getInstanceState no instance returned."
<< std::endl;
}
- }
- else {
+ } else {
std::cerr << "EC2_GTests::getInstanceState error "
<< outcome.GetError().GetMessage()
<< std::endl;
@@ -283,8 +284,7 @@ Aws::String AwsDocTest::EC2_GTests::createSecurityGroup(const Aws::String &group
if (!outcome.IsSuccess()) {
std::cerr << "Failed to create security group:" <<
outcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
groupID = outcome.GetResult().GetGroupId();
}
@@ -333,8 +333,7 @@ Aws::String AwsDocTest::EC2_GTests::getVpcID() {
break;
}
}
- }
- else {
+ } else {
std::cerr << "Failed to describe security groups:" <<
outcome.GetError().GetMessage() << std::endl;
done = true;
@@ -362,14 +361,35 @@ Aws::String AwsDocTest::EC2_GTests::allocateIPAddress() {
std::cerr
<< "EC2_GTests::allocateIPAddress: failed to allocate Elastic IP address:"
<< outcome.GetError().GetMessage() << std::endl;
- }
- else {
+ } else {
allocationID = outcome.GetResult().GetAllocationId();
}
return allocationID;
}
+Aws::String AwsDocTest::EC2_GTests::getCachedSecurityGroupID() {
+ if (s_securityGroupID.empty()) {
+ s_securityGroupID = createSecurityGroup("cpp-test-group");
+ }
+
+ return s_securityGroupID;
+}
+
+bool AwsDocTest::EC2_GTests::dissociateAddress(const Aws::String &associationID) {
+ Aws::EC2::EC2Client ec2Client(*s_clientConfig);
+ Aws::EC2::Model::DisassociateAddressRequest request;
+ request.SetAssociationId(associationID);
+ const Aws::EC2::Model::DisassociateAddressOutcome outcome =
+ ec2Client.DisassociateAddress(request);
+ if (!outcome.IsSuccess()) {
+ std::cerr << "EC2_GTests::dissociateAddress error: " <<
+ outcome.GetError().GetMessage() << std::endl;
+ }
+
+ return outcome.IsSuccess();
+}
+
int AwsDocTest::MyStringBuffer::underflow() {
int result = basic_stringbuf::underflow();
if (result == EOF) {
diff --git a/cpp/example_code/ec2/tests/ec2_gtests.h b/cpp/example_code/ec2/tests/ec2_gtests.h
index 6ad702b0053..29c4d964514 100644
--- a/cpp/example_code/ec2/tests/ec2_gtests.h
+++ b/cpp/example_code/ec2/tests/ec2_gtests.h
@@ -60,15 +60,19 @@ namespace AwsDocTest {
static bool deleteSecurityGroup(const Aws::String &groupID);
+ static bool dissociateAddress(const Aws::String &associationID);
+
static Aws::String getVpcID();
+ static Aws::String getCachedSecurityGroupID();
+
// s_clientConfig must be a pointer because the client config must be initialized
// after InitAPI.
static std::unique_ptr s_clientConfig;
private:
- bool suppressStdOut();
+ static bool suppressStdOut();
static Aws::SDKOptions s_options;
@@ -80,6 +84,7 @@ namespace AwsDocTest {
static Aws::String s_instanceID;
static Aws::String s_vpcID;
+ static Aws::String s_securityGroupID;
}; // EC2_GTests
} // AwsDocTest
diff --git a/cpp/example_code/ec2/tests/gtest_allocate_address.cpp b/cpp/example_code/ec2/tests/gtest_allocate_address.cpp
index bec76c3c114..5c57c53a29f 100644
--- a/cpp/example_code/ec2/tests/gtest_allocate_address.cpp
+++ b/cpp/example_code/ec2/tests/gtest_allocate_address.cpp
@@ -21,8 +21,9 @@ namespace AwsDocTest {
ASSERT_FALSE(instanceID.empty()) << preconditionError() << std::endl;
Aws::String allocationID;
- auto result = AwsDoc::EC2::AllocateAndAssociateAddress(instanceID,
- allocationID,
+ Aws::String publicIpAddress;
+ auto result = AwsDoc::EC2::allocateAndAssociateAddress(instanceID,
+ publicIpAddress, allocationID,
*s_clientConfig);
ASSERT_TRUE(result);
diff --git a/cpp/example_code/ec2/tests/gtest_associate_address.cpp b/cpp/example_code/ec2/tests/gtest_associate_address.cpp
new file mode 100644
index 00000000000..1e23833a668
--- /dev/null
+++ b/cpp/example_code/ec2/tests/gtest_associate_address.cpp
@@ -0,0 +1,38 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/*
+ * Test types are indicated by the test label ending.
+ *
+ * _1_ Requires credentials, permissions, and AWS resources.
+ * _2_ Requires credentials and permissions.
+ * _3_ Does not require credentials.
+ *
+ */
+
+#include
+#include "ec2_samples.h"
+#include "ec2_gtests.h"
+
+namespace AwsDocTest {
+ // NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(EC2_GTests, associate_address_2_) {
+ Aws::String allocationID = allocateIPAddress();
+ ASSERT_FALSE(allocationID.empty()) << preconditionError() << std::endl;
+
+ Aws::String instanceID = getCachedInstanceID();
+ ASSERT_FALSE(instanceID.empty()) << preconditionError() << std::endl;
+
+ Aws::String associationID;
+
+ bool result = AwsDoc::EC2::associateAddress(instanceID, allocationID, associationID, *s_clientConfig);
+ EXPECT_TRUE(result);
+
+ if (result) {
+ result = dissociateAddress(associationID);
+ EXPECT_TRUE(result);
+ }
+
+ result = releaseIPAddress(allocationID);
+ EXPECT_TRUE(result);
+ }
+} // namespace AwsDocTest
diff --git a/cpp/example_code/ec2/tests/gtest_authorize_security_group_ingress.cpp b/cpp/example_code/ec2/tests/gtest_authorize_security_group_ingress.cpp
new file mode 100644
index 00000000000..a11417c5109
--- /dev/null
+++ b/cpp/example_code/ec2/tests/gtest_authorize_security_group_ingress.cpp
@@ -0,0 +1,25 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/*
+ * Test types are indicated by the test label ending.
+ *
+ * _1_ Requires credentials, permissions, and AWS resources.
+ * _2_ Requires credentials and permissions.
+ * _3_ Does not require credentials.
+ *
+ */
+
+#include
+#include "ec2_samples.h"
+#include "ec2_gtests.h"
+
+namespace AwsDocTest {
+ // NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(EC2_GTests, authorize_security_group_ingress_2_) {
+ Aws::String groupID = getCachedSecurityGroupID();
+ ASSERT_FALSE(groupID.empty()) << preconditionError() << std::endl;
+
+ bool result = AwsDoc::EC2::authorizeSecurityGroupIngress(groupID, *s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+} // namespace AwsDocTest
diff --git a/cpp/example_code/ec2/tests/gtest_create_key_pair.cpp b/cpp/example_code/ec2/tests/gtest_create_key_pair.cpp
index f6594f84a14..91def26d9f6 100644
--- a/cpp/example_code/ec2/tests/gtest_create_key_pair.cpp
+++ b/cpp/example_code/ec2/tests/gtest_create_key_pair.cpp
@@ -17,7 +17,7 @@ namespace AwsDocTest {
// NOLINTNEXTLINE(readability-named-parameter)
TEST_F(EC2_GTests, create_key_pair_2_) {
auto keyPairName = uuidName("test-create");
- auto result = AwsDoc::EC2::CreateKeyPair(keyPairName, *s_clientConfig);
+ auto result = AwsDoc::EC2::createKeyPair(keyPairName, "", *s_clientConfig);
ASSERT_TRUE(result);
deleteKeyPair(keyPairName);
diff --git a/cpp/example_code/ec2/tests/gtest_create_security_group.cpp b/cpp/example_code/ec2/tests/gtest_create_security_group.cpp
index 8b0e073ce5d..3d980fbe77a 100644
--- a/cpp/example_code/ec2/tests/gtest_create_security_group.cpp
+++ b/cpp/example_code/ec2/tests/gtest_create_security_group.cpp
@@ -21,7 +21,7 @@ namespace AwsDocTest {
auto groupName = uuidName("test-create");
Aws::String groupID;
- auto result = AwsDoc::EC2::CreateSecurityGroup(groupName, "description",
+ auto result = AwsDoc::EC2::createSecurityGroup(groupName, "description",
vpcID, groupID, *s_clientConfig);
EXPECT_TRUE(result);
diff --git a/cpp/example_code/ec2/tests/gtest_create_tags.cpp b/cpp/example_code/ec2/tests/gtest_create_tags.cpp
new file mode 100644
index 00000000000..6755adae613
--- /dev/null
+++ b/cpp/example_code/ec2/tests/gtest_create_tags.cpp
@@ -0,0 +1,29 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/*
+ * Test types are indicated by the test label ending.
+ *
+ * _1_ Requires credentials, permissions, and AWS resources.
+ * _2_ Requires credentials and permissions.
+ * _3_ Does not require credentials.
+ *
+ */
+
+#include
+#include "ec2_samples.h"
+#include "ec2_gtests.h"
+
+namespace AwsDocTest {
+ // NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(EC2_GTests, create_tags_2_) {
+ Aws::String instanceID = getCachedInstanceID();
+ ASSERT_FALSE(instanceID.empty()) << preconditionError() << std::endl;
+ Aws::Vector resources;
+ resources.push_back(instanceID);
+
+ Aws::Vector tags;
+ tags.push_back(Aws::EC2::Model::Tag().WithKey("Name").WithValue("Test"));
+ bool result = AwsDoc::EC2::createTags(resources, tags, *s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+} // namespace AwsDocTest
diff --git a/cpp/example_code/ec2/tests/gtest_delete_key_pair.cpp b/cpp/example_code/ec2/tests/gtest_delete_key_pair.cpp
index 856162138be..cfbc161909f 100644
--- a/cpp/example_code/ec2/tests/gtest_delete_key_pair.cpp
+++ b/cpp/example_code/ec2/tests/gtest_delete_key_pair.cpp
@@ -20,7 +20,7 @@ namespace AwsDocTest {
bool result = createKeyPair(keyPairName);
ASSERT_TRUE(result) << preconditionError() << std::endl;
- result = AwsDoc::EC2::DeleteKeyPair(keyPairName, *s_clientConfig);
+ result = AwsDoc::EC2::deleteKeyPair(keyPairName, *s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_delete_security_group.cpp b/cpp/example_code/ec2/tests/gtest_delete_security_group.cpp
index 5c3aa0e0384..ce32ef0a037 100644
--- a/cpp/example_code/ec2/tests/gtest_delete_security_group.cpp
+++ b/cpp/example_code/ec2/tests/gtest_delete_security_group.cpp
@@ -19,7 +19,7 @@ namespace AwsDocTest {
Aws::String groupID = createSecurityGroup(uuidName("test-delete"));
ASSERT_FALSE(groupID.empty());
- auto result = AwsDoc::EC2::DeleteSecurityGroup(groupID, *s_clientConfig);
+ auto result = AwsDoc::EC2::deleteSecurityGroup(groupID, *s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_describe_addresses.cpp b/cpp/example_code/ec2/tests/gtest_describe_addresses.cpp
index 68f4a576b65..d738981d5d0 100644
--- a/cpp/example_code/ec2/tests/gtest_describe_addresses.cpp
+++ b/cpp/example_code/ec2/tests/gtest_describe_addresses.cpp
@@ -17,7 +17,7 @@ namespace AwsDocTest {
// NOLINTNEXTLINE(readability-named-parameter)
TEST_F(EC2_GTests, describe_Addresses_2_) {
- auto result = AwsDoc::EC2::DescribeAddresses(*s_clientConfig);
+ auto result = AwsDoc::EC2::describeAddresses(*s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_describe_availability_zones.cpp b/cpp/example_code/ec2/tests/gtest_describe_availability_zones.cpp
new file mode 100644
index 00000000000..bae41066a85
--- /dev/null
+++ b/cpp/example_code/ec2/tests/gtest_describe_availability_zones.cpp
@@ -0,0 +1,24 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+/*
+ * Test types are indicated by the test label ending.
+ *
+ * _1_ Requires credentials, permissions, and AWS resources.
+ * _2_ Requires credentials and permissions.
+ * _3_ Does not require credentials.
+ *
+ */
+
+#include
+#include "ec2_samples.h"
+#include "ec2_gtests.h"
+
+namespace AwsDocTest {
+ // NOLINTNEXTLINE(readability-named-parameter)
+ TEST_F(EC2_GTests, describe_availability_zones_2_) {
+
+
+ bool result = AwsDoc::EC2::describeAvailabilityZones(*s_clientConfig);
+ ASSERT_TRUE(result);
+ }
+} // namespace AwsDocTest
diff --git a/cpp/example_code/ec2/tests/gtest_describe_instances.cpp b/cpp/example_code/ec2/tests/gtest_describe_instances.cpp
index 390ecf77c45..8b0365f6b20 100644
--- a/cpp/example_code/ec2/tests/gtest_describe_instances.cpp
+++ b/cpp/example_code/ec2/tests/gtest_describe_instances.cpp
@@ -17,7 +17,7 @@ namespace AwsDocTest {
// NOLINTNEXTLINE(readability-named-parameter)
TEST_F(EC2_GTests, describe_instances_2_) {
- auto result = AwsDoc::EC2::DescribeInstances(*s_clientConfig);
+ auto result = AwsDoc::EC2::describeInstances(*s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_describe_key_pairs.cpp b/cpp/example_code/ec2/tests/gtest_describe_key_pairs.cpp
index fc9fc49e6ba..0c719324681 100644
--- a/cpp/example_code/ec2/tests/gtest_describe_key_pairs.cpp
+++ b/cpp/example_code/ec2/tests/gtest_describe_key_pairs.cpp
@@ -17,7 +17,7 @@ namespace AwsDocTest {
// NOLINTNEXTLINE(readability-named-parameter)
TEST_F(EC2_GTests, describe_key_pairs_2_) {
- auto result = AwsDoc::EC2::DescribeKeyPairs(*s_clientConfig);
+ auto result = AwsDoc::EC2::describeKeyPairs(*s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_describe_regions_and_zones.cpp b/cpp/example_code/ec2/tests/gtest_describe_regions_and_zones.cpp
index d4e400824fc..72132aef243 100644
--- a/cpp/example_code/ec2/tests/gtest_describe_regions_and_zones.cpp
+++ b/cpp/example_code/ec2/tests/gtest_describe_regions_and_zones.cpp
@@ -17,7 +17,7 @@ namespace AwsDocTest {
// NOLINTNEXTLINE(readability-named-parameter)
TEST_F(EC2_GTests, describe_regions_and_zones_2_) {
- auto result = AwsDoc::EC2::DescribeRegionsAndZones(*s_clientConfig);
+ auto result = AwsDoc::EC2::describeRegions(*s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_describe_security_groups.cpp b/cpp/example_code/ec2/tests/gtest_describe_security_groups.cpp
index 7a82acbdc11..e56c9df1574 100644
--- a/cpp/example_code/ec2/tests/gtest_describe_security_groups.cpp
+++ b/cpp/example_code/ec2/tests/gtest_describe_security_groups.cpp
@@ -17,7 +17,7 @@ namespace AwsDocTest {
// NOLINTNEXTLINE(readability-named-parameter)
TEST_F(EC2_GTests, describe_security_groups_2_) {
- auto result = AwsDoc::EC2::DescribeSecurityGroups("", *s_clientConfig);
+ auto result = AwsDoc::EC2::describeSecurityGroups("", *s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_monitor_instance.cpp b/cpp/example_code/ec2/tests/gtest_monitor_instance.cpp
index 36612154263..be1a8c84999 100644
--- a/cpp/example_code/ec2/tests/gtest_monitor_instance.cpp
+++ b/cpp/example_code/ec2/tests/gtest_monitor_instance.cpp
@@ -19,10 +19,10 @@ namespace AwsDocTest {
auto instanceID = getCachedInstanceID();
ASSERT_FALSE(instanceID.empty()) << preconditionError << std::endl;
- auto result = AwsDoc::EC2::EnableMonitoring(instanceID, *s_clientConfig);
+ auto result = AwsDoc::EC2::enableMonitoring(instanceID, *s_clientConfig);
ASSERT_TRUE(result);
- result = AwsDoc::EC2::DisableMonitoring(instanceID, *s_clientConfig);
+ result = AwsDoc::EC2::disableMonitoring(instanceID, *s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_reboot_instance.cpp b/cpp/example_code/ec2/tests/gtest_reboot_instance.cpp
index 6b6b91b0497..1b18aacf46e 100644
--- a/cpp/example_code/ec2/tests/gtest_reboot_instance.cpp
+++ b/cpp/example_code/ec2/tests/gtest_reboot_instance.cpp
@@ -19,7 +19,7 @@ namespace AwsDocTest {
auto instanceID = getCachedInstanceID();
ASSERT_FALSE(instanceID.empty()) << preconditionError << std::endl;
- auto result = AwsDoc::EC2::RebootInstance(instanceID, *s_clientConfig);
+ auto result = AwsDoc::EC2::rebootInstance(instanceID, *s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_release_address.cpp b/cpp/example_code/ec2/tests/gtest_release_address.cpp
index 1634fe20aee..5ace7fdc370 100644
--- a/cpp/example_code/ec2/tests/gtest_release_address.cpp
+++ b/cpp/example_code/ec2/tests/gtest_release_address.cpp
@@ -19,7 +19,7 @@ namespace AwsDocTest {
Aws::String allocationID = allocateIPAddress();
ASSERT_FALSE(allocationID.empty()) << preconditionError() << std::endl;
- auto result = AwsDoc::EC2::ReleaseAddress(allocationID, *s_clientConfig);
+ auto result = AwsDoc::EC2::releaseAddress(allocationID, *s_clientConfig);
ASSERT_TRUE(result);
}
diff --git a/cpp/example_code/ec2/tests/gtest_create_instance.cpp b/cpp/example_code/ec2/tests/gtest_run_instances.cpp
similarity index 93%
rename from cpp/example_code/ec2/tests/gtest_create_instance.cpp
rename to cpp/example_code/ec2/tests/gtest_run_instances.cpp
index 3725ff0423e..106db04a6d2 100644
--- a/cpp/example_code/ec2/tests/gtest_create_instance.cpp
+++ b/cpp/example_code/ec2/tests/gtest_run_instances.cpp
@@ -22,7 +22,7 @@ namespace AwsDocTest {
auto instanceName = uuidName("test-create");
Aws::String instanceID;
- auto result = AwsDoc::EC2::RunInstance(instanceName, amiID,
+ auto result = AwsDoc::EC2::runInstance(instanceName, amiID,
instanceID, *s_clientConfig);
ASSERT_TRUE(result);
diff --git a/cpp/example_code/ec2/tests/gtest_start_stop_instance.cpp b/cpp/example_code/ec2/tests/gtest_start_stop_instance.cpp
index 0e394574f8b..d8e50416a10 100644
--- a/cpp/example_code/ec2/tests/gtest_start_stop_instance.cpp
+++ b/cpp/example_code/ec2/tests/gtest_start_stop_instance.cpp
@@ -19,12 +19,12 @@ namespace AwsDocTest {
auto instanceID = getCachedInstanceID();
ASSERT_FALSE(instanceID.empty()) << preconditionError << std::endl;
- auto result = AwsDoc::EC2::StopInstance(instanceID, *s_clientConfig);
+ auto result = AwsDoc::EC2::stopInstance(instanceID, *s_clientConfig);
ASSERT_TRUE(result);
waitWhileInstanceInState(instanceID,
Aws::EC2::Model::InstanceStateName::stopping);
- result = AwsDoc::EC2::StartInstance(instanceID, *s_clientConfig);
+ result = AwsDoc::EC2::startInstance(instanceID, *s_clientConfig);
ASSERT_TRUE(result);
waitWhileInstanceInState(instanceID,
diff --git a/cpp/example_code/ec2/tests/gtest_terminate_instances.cpp b/cpp/example_code/ec2/tests/gtest_terminate_instances.cpp
index e56accbebe9..37a4f855b14 100644
--- a/cpp/example_code/ec2/tests/gtest_terminate_instances.cpp
+++ b/cpp/example_code/ec2/tests/gtest_terminate_instances.cpp
@@ -19,7 +19,7 @@ namespace AwsDocTest {
auto instanceID = getCachedInstanceID();
ASSERT_FALSE(instanceID.empty()) << preconditionError << std::endl;
- auto result = AwsDoc::EC2::TerminateInstances(instanceID, *s_clientConfig);
+ auto result = AwsDoc::EC2::terminateInstances(instanceID, *s_clientConfig);
ASSERT_TRUE(result);
}
From 32dd8c0fd0d3fbad2d720255116eadf882732e79 Mon Sep 17 00:00:00 2001
From: Scott Macdonald <57190223+scmacdon@users.noreply.github.com>
Date: Fri, 2 Aug 2024 11:44:51 -0400
Subject: [PATCH 17/98] Java V2 Refactored SSM scenario (#6650)
---
.doc_gen/metadata/ssm_metadata.yaml | 3 +
javav2/example_code/ssm/README.md | 18 +-
javav2/example_code/ssm/pom.xml | 6 +-
.../java/com/example/scenario/SSMActions.java | 641 +++++++++++++++++
.../com/example/scenario/SSMScenario.java | 642 +++++-------------
.../ssm/src/test/java/AWSSSMTest.java | 41 +-
6 files changed, 852 insertions(+), 499 deletions(-)
create mode 100644 javav2/example_code/ssm/src/main/java/com/example/scenario/SSMActions.java
diff --git a/.doc_gen/metadata/ssm_metadata.yaml b/.doc_gen/metadata/ssm_metadata.yaml
index 6adf74badd9..ae6d398a2cd 100644
--- a/.doc_gen/metadata/ssm_metadata.yaml
+++ b/.doc_gen/metadata/ssm_metadata.yaml
@@ -316,6 +316,9 @@ ssm_Scenario:
- description:
snippet_tags:
- ssm.java2.scenario.main
+ - description: A wrapper class for &SYS; SDK methods.
+ snippet_tags:
+ - ssm.java2.actions.main
Python:
versions:
- sdk_version: 3
diff --git a/javav2/example_code/ssm/README.md b/javav2/example_code/ssm/README.md
index 1848633de8d..596fff26656 100644
--- a/javav2/example_code/ssm/README.md
+++ b/javav2/example_code/ssm/README.md
@@ -38,17 +38,17 @@ For prerequisites, see the [README](../../README.md#Prerequisites) in the `javav
Code excerpts that show you how to call individual service functions.
-- [CreateDocument](src/main/java/com/example/scenario/SSMScenario.java#L475)
-- [CreateMaintenanceWindow](src/main/java/com/example/scenario/SSMScenario.java#L429)
-- [CreateOpsItem](src/main/java/com/example/scenario/SSMScenario.java#L227)
-- [DeleteDocument](src/main/java/com/example/scenario/SSMScenario.java#L371)
-- [DeleteMaintenanceWindow](src/main/java/com/example/scenario/SSMScenario.java#L389)
-- [DescribeOpsItems](src/main/java/com/example/scenario/SSMScenario.java#L517)
+- [CreateDocument](src/main/java/com/example/scenario/SSMActions.java#L481)
+- [CreateMaintenanceWindow](src/main/java/com/example/scenario/SSMActions.java#L569)
+- [CreateOpsItem](src/main/java/com/example/scenario/SSMActions.java#L314)
+- [DeleteDocument](src/main/java/com/example/scenario/SSMActions.java#L92)
+- [DeleteMaintenanceWindow](src/main/java/com/example/scenario/SSMActions.java#L131)
+- [DescribeOpsItems](src/main/java/com/example/scenario/SSMActions.java#L210)
- [DescribeParameters](src/main/java/com/example/ssm/GetParameter.java#L6)
- [PutParameter](src/main/java/com/example/ssm/PutParameter.java#L6)
-- [SendCommand](src/main/java/com/example/scenario/SSMScenario.java#L311)
-- [UpdateMaintenanceWindow](src/main/java/com/example/scenario/SSMScenario.java#L406)
-- [UpdateOpsItem](src/main/java/com/example/scenario/SSMScenario.java#L275)
+- [SendCommand](src/main/java/com/example/scenario/SSMActions.java#L385)
+- [UpdateMaintenanceWindow](src/main/java/com/example/scenario/SSMActions.java#L532)
+- [UpdateOpsItem](src/main/java/com/example/scenario/SSMActions.java#L170)
### Scenarios
diff --git a/javav2/example_code/ssm/pom.xml b/javav2/example_code/ssm/pom.xml
index fe126b1586b..a73fdc3ba7e 100644
--- a/javav2/example_code/ssm/pom.xml
+++ b/javav2/example_code/ssm/pom.xml
@@ -38,7 +38,7 @@
software.amazon.awssdkbom
- 2.21.20
+ 2.25.56pomimport
@@ -59,6 +59,10 @@
software.amazon.awssdksecretsmanager
+
+ software.amazon.awssdk
+ netty-nio-client
+ com.google.code.gsongson
diff --git a/javav2/example_code/ssm/src/main/java/com/example/scenario/SSMActions.java b/javav2/example_code/ssm/src/main/java/com/example/scenario/SSMActions.java
new file mode 100644
index 00000000000..8a4056b880b
--- /dev/null
+++ b/javav2/example_code/ssm/src/main/java/com/example/scenario/SSMActions.java
@@ -0,0 +1,641 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.example.scenario;
+
+import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider;
+import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
+import software.amazon.awssdk.core.retry.RetryPolicy;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.ssm.SsmAsyncClient;
+import software.amazon.awssdk.services.ssm.model.CommandInvocation;
+import software.amazon.awssdk.services.ssm.model.CommandInvocationStatus;
+import software.amazon.awssdk.services.ssm.model.CreateDocumentRequest;
+import software.amazon.awssdk.services.ssm.model.CreateDocumentResponse;
+import software.amazon.awssdk.services.ssm.model.CreateMaintenanceWindowRequest;
+import software.amazon.awssdk.services.ssm.model.CreateMaintenanceWindowResponse;
+import software.amazon.awssdk.services.ssm.model.CreateOpsItemRequest;
+import software.amazon.awssdk.services.ssm.model.DeleteDocumentRequest;
+import software.amazon.awssdk.services.ssm.model.DeleteMaintenanceWindowRequest;
+import software.amazon.awssdk.services.ssm.model.DescribeDocumentRequest;
+import software.amazon.awssdk.services.ssm.model.DescribeDocumentResponse;
+import software.amazon.awssdk.services.ssm.model.DescribeMaintenanceWindowsRequest;
+import software.amazon.awssdk.services.ssm.model.DescribeMaintenanceWindowsResponse;
+import software.amazon.awssdk.services.ssm.model.DescribeOpsItemsRequest;
+import software.amazon.awssdk.services.ssm.model.DocumentAlreadyExistsException;
+import software.amazon.awssdk.services.ssm.model.DocumentType;
+import software.amazon.awssdk.services.ssm.model.GetCommandInvocationRequest;
+import software.amazon.awssdk.services.ssm.model.GetCommandInvocationResponse;
+import software.amazon.awssdk.services.ssm.model.ListCommandInvocationsRequest;
+import software.amazon.awssdk.services.ssm.model.ListCommandInvocationsResponse;
+import software.amazon.awssdk.services.ssm.model.MaintenanceWindowFilter;
+import software.amazon.awssdk.services.ssm.model.MaintenanceWindowIdentity;
+import software.amazon.awssdk.services.ssm.model.OpsItem;
+import software.amazon.awssdk.services.ssm.model.OpsItemDataValue;
+import software.amazon.awssdk.services.ssm.model.OpsItemFilter;
+import software.amazon.awssdk.services.ssm.model.OpsItemFilterKey;
+import software.amazon.awssdk.services.ssm.model.OpsItemFilterOperator;
+import software.amazon.awssdk.services.ssm.model.OpsItemStatus;
+import software.amazon.awssdk.services.ssm.model.OpsItemSummary;
+import software.amazon.awssdk.services.ssm.model.SendCommandRequest;
+import software.amazon.awssdk.services.ssm.model.SendCommandResponse;
+import software.amazon.awssdk.services.ssm.model.SsmException;
+import software.amazon.awssdk.services.ssm.model.UpdateMaintenanceWindowRequest;
+import software.amazon.awssdk.services.ssm.model.UpdateMaintenanceWindowResponse;
+import software.amazon.awssdk.services.ssm.model.CreateOpsItemResponse;
+import software.amazon.awssdk.services.ssm.model.UpdateOpsItemRequest;
+import software.amazon.awssdk.services.ssm.model.GetOpsItemRequest;
+import software.amazon.awssdk.services.ssm.model.GetOpsItemResponse;
+import java.time.Duration;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.TimeUnit;
+// snippet-start:[ssm.java2.actions.main]
+public class SSMActions {
+
+ private static SsmAsyncClient ssmAsyncClient;
+
+ private static SsmAsyncClient getAsyncClient() {
+ if (ssmAsyncClient == null) {
+ SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder()
+ .maxConcurrency(100)
+ .connectionTimeout(Duration.ofSeconds(60))
+ .readTimeout(Duration.ofSeconds(60))
+ .writeTimeout(Duration.ofSeconds(60))
+ .build();
+
+ ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder()
+ .apiCallTimeout(Duration.ofMinutes(2))
+ .apiCallAttemptTimeout(Duration.ofSeconds(90))
+ .retryPolicy(RetryPolicy.builder()
+ .numRetries(3)
+ .build())
+ .build();
+
+ ssmAsyncClient = SsmAsyncClient.builder()
+ .region(Region.US_EAST_1)
+ .httpClient(httpClient)
+ .overrideConfiguration(overrideConfig)
+ .credentialsProvider(EnvironmentVariableCredentialsProvider.create())
+ .build();
+ }
+ return ssmAsyncClient;
+ }
+
+ // snippet-start:[ssm.Java2.delete_doc.main]
+ /**
+ * Deletes an AWS SSM document asynchronously.
+ *
+ * @param documentName The name of the document to delete.
+ *
+ * This method initiates an asynchronous request to delete an SSM document.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public void deleteDoc(String documentName) {
+ DeleteDocumentRequest documentRequest = DeleteDocumentRequest.builder()
+ .name(documentName)
+ .build();
+
+ CompletableFuture future = CompletableFuture.runAsync(() -> {
+ getAsyncClient().deleteDocument(documentRequest)
+ .thenAccept(response -> {
+ System.out.println("The SSM document was successfully deleted.");
+ })
+ .exceptionally(ex -> {
+ throw new CompletionException(ex);
+ }).join();
+ }).exceptionally(ex -> {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof SsmException) {
+ throw new RuntimeException("SSM error: " + cause.getMessage(), cause);
+ } else {
+ throw new RuntimeException("Unexpected error: " + cause.getMessage(), cause);
+ }
+ });
+
+ try {
+ future.join();
+ } catch (CompletionException ex) {
+ throw ex.getCause() instanceof RuntimeException ? (RuntimeException) ex.getCause() : ex;
+ }
+ }
+ // snippet-end:[ssm.Java2.delete_doc.main]
+
+ // snippet-start:[ssm.java2.delete_window.main]
+ /**
+ * Deletes an AWS SSM Maintenance Window asynchronously.
+ *
+ * @param winId The ID of the Maintenance Window to delete.
+ *
+ * This method initiates an asynchronous request to delete an SSM Maintenance Window.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public void deleteMaintenanceWindow(String winId) {
+ DeleteMaintenanceWindowRequest windowRequest = DeleteMaintenanceWindowRequest.builder()
+ .windowId(winId)
+ .build();
+
+ CompletableFuture future = CompletableFuture.runAsync(() -> {
+ getAsyncClient().deleteMaintenanceWindow(windowRequest)
+ .thenAccept(response -> {
+ System.out.println("The maintenance window was successfully deleted.");
+ })
+ .exceptionally(ex -> {
+ throw new CompletionException(ex);
+ }).join();
+ }).exceptionally(ex -> {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof SsmException) {
+ throw new RuntimeException("SSM error: " + cause.getMessage(), cause);
+ } else {
+ throw new RuntimeException("Unexpected error: " + cause.getMessage(), cause);
+ }
+ });
+
+ try {
+ future.join();
+ } catch (CompletionException ex) {
+ throw ex.getCause() instanceof RuntimeException ? (RuntimeException) ex.getCause() : ex;
+ }
+ }
+ // snippet-end:[ssm.java2.delete_window.main]
+
+ // snippet-start:[ssm.Java2.resolve_ops.main]
+ /**
+ * Resolves an AWS SSM OpsItem asynchronously.
+ *
+ * @param opsID The ID of the OpsItem to resolve.
+ *
+ * This method initiates an asynchronous request to resolve an SSM OpsItem.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public void resolveOpsItem(String opsID) {
+ UpdateOpsItemRequest opsItemRequest = UpdateOpsItemRequest.builder()
+ .opsItemId(opsID)
+ .status(OpsItemStatus.RESOLVED)
+ .build();
+
+ CompletableFuture future = CompletableFuture.runAsync(() -> {
+ getAsyncClient().updateOpsItem(opsItemRequest)
+ .thenAccept(response -> {
+ System.out.println("OpsItem resolved successfully.");
+ })
+ .exceptionally(ex -> {
+ throw new CompletionException(ex);
+ }).join();
+ }).exceptionally(ex -> {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof SsmException) {
+ throw new RuntimeException("SSM error: " + cause.getMessage(), cause);
+ } else {
+ throw new RuntimeException("Unexpected error: " + cause.getMessage(), cause);
+ }
+ });
+
+ try {
+ future.join();
+ } catch (CompletionException ex) {
+ throw ex.getCause() instanceof RuntimeException ? (RuntimeException) ex.getCause() : ex;
+ }
+ }
+ // snippet-end:[ssm.Java2.resolve_ops.main]
+
+ // snippet-start:[ssm.java2.describe_ops.main]
+ /**
+ * Describes AWS SSM OpsItems asynchronously.
+ *
+ * @param key The key to filter OpsItems by (e.g., OPS_ITEM_ID).
+ *
+ * This method initiates an asynchronous request to describe SSM OpsItems.
+ * If the request is successful, it prints the title and status of each OpsItem.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public void describeOpsItems(String key) {
+ OpsItemFilter filter = OpsItemFilter.builder()
+ .key(OpsItemFilterKey.OPS_ITEM_ID)
+ .values(key)
+ .operator(OpsItemFilterOperator.EQUAL)
+ .build();
+
+ DescribeOpsItemsRequest itemsRequest = DescribeOpsItemsRequest.builder()
+ .maxResults(10)
+ .opsItemFilters(filter)
+ .build();
+
+ CompletableFuture future = CompletableFuture.runAsync(() -> {
+ getAsyncClient().describeOpsItems(itemsRequest)
+ .thenAccept(itemsResponse -> {
+ List items = itemsResponse.opsItemSummaries();
+ for (OpsItemSummary item : items) {
+ System.out.println("The item title is " + item.title() + " and the status is " + item.status().toString());
+ }
+ })
+ .exceptionally(ex -> {
+ throw new CompletionException(ex);
+ }).join();
+ }).exceptionally(ex -> {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof SsmException) {
+ throw new RuntimeException("SSM error: " + cause.getMessage(), cause);
+ } else {
+ throw new RuntimeException("Unexpected error: " + cause.getMessage(), cause);
+ }
+ });
+
+ try {
+ future.join();
+ } catch (CompletionException ex) {
+ throw ex.getCause() instanceof RuntimeException ? (RuntimeException) ex.getCause() : ex;
+ }
+ }
+ // snippet-end:[ssm.java2.describe_ops.main]
+
+ // snippet-start:[ssm.java2.update_ops.main]
+ /**
+ * Updates the AWS SSM OpsItem asynchronously.
+ *
+ * @param opsItemId The ID of the OpsItem to update.
+ * @param title The new title of the OpsItem.
+ * @param description The new description of the OpsItem.
+ *
+ * This method initiates an asynchronous request to update an SSM OpsItem.
+ * If the request is successful, it completes without returning a value.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public void updateOpsItem(String opsItemId, String title, String description) {
+ Map operationalData = new HashMap<>();
+ operationalData.put("key1", OpsItemDataValue.builder().value("value1").build());
+ operationalData.put("key2", OpsItemDataValue.builder().value("value2").build());
+
+ CompletableFuture future = getOpsItem(opsItemId).thenCompose(opsItem -> {
+ UpdateOpsItemRequest request = UpdateOpsItemRequest.builder()
+ .opsItemId(opsItemId)
+ .title(title)
+ .operationalData(operationalData)
+ .status(opsItem.statusAsString())
+ .description(description)
+ .build();
+
+ return getAsyncClient().updateOpsItem(request).thenAccept(response -> {
+ System.out.println(opsItemId + " updated successfully.");
+ }).exceptionally(ex -> {
+ throw new CompletionException(ex);
+ });
+ }).exceptionally(ex -> {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof SsmException) {
+ throw new RuntimeException("SSM error: " + cause.getMessage(), cause);
+ } else {
+ throw new RuntimeException("Unexpected error: " + cause.getMessage(), cause);
+ }
+ });
+
+ try {
+ future.join();
+ } catch (CompletionException ex) {
+ throw ex.getCause() instanceof RuntimeException ? (RuntimeException) ex.getCause() : ex;
+ }
+ }
+
+
+ private static CompletableFuture getOpsItem(String opsItemId) {
+ GetOpsItemRequest request = GetOpsItemRequest.builder().opsItemId(opsItemId).build();
+ return getAsyncClient().getOpsItem(request).thenApply(GetOpsItemResponse::opsItem);
+ }
+ // snippet-end:[ssm.java2.update_ops.main]
+
+ // snippet-start:[ssm.java2.create_ops.main]
+ /**
+ * Creates an SSM OpsItem asynchronously.
+ *
+ * @param title The title of the OpsItem.
+ * @param source The source of the OpsItem.
+ * @param category The category of the OpsItem.
+ * @param severity The severity of the OpsItem.
+ * @return The ID of the created OpsItem.
+ *
+ * This method initiates an asynchronous request to create an SSM OpsItem.
+ * If the request is successful, it returns the OpsItem ID.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public String createSSMOpsItem(String title, String source, String category, String severity) {
+ CreateOpsItemRequest opsItemRequest = CreateOpsItemRequest.builder()
+ .description("Created by the SSM Java API")
+ .title(title)
+ .source(source)
+ .category(category)
+ .severity(severity)
+ .build();
+
+ CompletableFuture future = getAsyncClient().createOpsItem(opsItemRequest);
+
+ try {
+ CreateOpsItemResponse response = future.join();
+ return response.opsItemId();
+ } catch (CompletionException e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof SsmException) {
+ throw (SsmException) cause;
+ } else {
+ throw new RuntimeException(cause);
+ }
+ }
+ }
+ // snippet-end:[ssm.java2.create_ops.main]
+
+ // snippet-start:[ssm.java2.describe_command.main]
+ /**
+ * Displays the date and time when the specific command was invoked.
+ *
+ * @param commandId The ID of the command to describe.
+ *
+ * This method initiates an asynchronous request to list command invocations and prints the date and time of each command invocation.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public void displayCommands(String commandId) {
+ ListCommandInvocationsRequest commandInvocationsRequest = ListCommandInvocationsRequest.builder()
+ .commandId(commandId)
+ .build();
+
+ CompletableFuture future = getAsyncClient().listCommandInvocations(commandInvocationsRequest);
+ future.thenAccept(response -> {
+ List commandList = response.commandInvocations();
+ DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss").withZone(ZoneId.systemDefault());
+ for (CommandInvocation invocation : commandList) {
+ System.out.println("The time of the command invocation is " + formatter.format(invocation.requestedDateTime()));
+ }
+ }).exceptionally(ex -> {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof SsmException) {
+ throw (SsmException) cause;
+ } else {
+ throw new RuntimeException(cause);
+ }
+ }).join();
+ }
+ // snippet-end:[ssm.java2.describe_command.main]
+
+ // snippet-start:[ssm.Java2.send_command.main]
+ /**
+ * Sends a SSM command to a managed node asynchronously.
+ *
+ * @param documentName The name of the document to use.
+ * @param instanceId The ID of the instance to send the command to.
+ * @return The command ID.
+ *
+ * This method initiates asynchronous requests to send a SSM command to a managed node.
+ * It waits until the document is active, sends the command, and checks the command execution status.
+ */
+ public String sendSSMCommand(String documentName, String instanceId) throws InterruptedException, SsmException {
+ // Before we use Document to send a command - make sure it is active.
+ CompletableFuture documentActiveFuture = CompletableFuture.runAsync(() -> {
+ boolean isDocumentActive = false;
+ DescribeDocumentRequest request = DescribeDocumentRequest.builder()
+ .name(documentName)
+ .build();
+
+ while (!isDocumentActive) {
+ CompletableFuture response = getAsyncClient().describeDocument(request);
+ String documentStatus = response.join().document().statusAsString();
+ if (documentStatus.equals("Active")) {
+ System.out.println("The SSM document is active and ready to use.");
+ isDocumentActive = true;
+ } else {
+ System.out.println("The SSM document is not active. Status: " + documentStatus);
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ });
+
+ documentActiveFuture.join();
+
+ // Create the SendCommandRequest.
+ SendCommandRequest commandRequest = SendCommandRequest.builder()
+ .documentName(documentName)
+ .instanceIds(instanceId)
+ .build();
+
+ // Send the command.
+ CompletableFuture commandFuture = getAsyncClient().sendCommand(commandRequest);
+ final String[] commandId = {null};
+
+ commandFuture.whenComplete((commandResponse, ex) -> {
+ if (commandResponse != null) {
+ commandId[0] = commandResponse.command().commandId();
+ System.out.println("Command ID: " + commandId[0]);
+
+ // Wait for the command execution to complete.
+ GetCommandInvocationRequest invocationRequest = GetCommandInvocationRequest.builder()
+ .commandId(commandId[0])
+ .instanceId(instanceId)
+ .build();
+
+ try {
+ System.out.println("Wait 5 secs");
+ TimeUnit.SECONDS.sleep(5);
+
+ // Retrieve the command execution details.
+ CompletableFuture invocationFuture = getAsyncClient().getCommandInvocation(invocationRequest);
+ invocationFuture.whenComplete((commandInvocationResponse, invocationEx) -> {
+ if (commandInvocationResponse != null) {
+ // Check the status of the command execution.
+ CommandInvocationStatus status = commandInvocationResponse.status();
+ if (status == CommandInvocationStatus.SUCCESS) {
+ System.out.println("Command execution successful");
+ } else {
+ System.out.println("Command execution failed. Status: " + status);
+ }
+ } else {
+ Throwable invocationCause = (invocationEx instanceof CompletionException) ? invocationEx.getCause() : invocationEx;
+ throw new CompletionException(invocationCause);
+ }
+ }).join();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ } else {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof SsmException) {
+ throw (SsmException) cause;
+ } else {
+ throw new RuntimeException(cause);
+ }
+ }
+ }).join();
+
+ return commandId[0];
+ }
+ // snippet-end:[ssm.Java2.send_command.main]
+
+ // snippet-start:[ssm.java2.create_doc.main]
+ /**
+ * Creates an AWS SSM document asynchronously.
+ *
+ * @param docName The name of the document to create.
+ *
+ * This method initiates an asynchronous request to create an SSM document.
+ * If the request is successful, it prints the document status.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public void createSSMDoc(String docName) throws SsmException {
+ String jsonData = """
+ {
+ "schemaVersion": "2.2",
+ "description": "Run a simple shell command",
+ "mainSteps": [
+ {
+ "action": "aws:runShellScript",
+ "name": "runEchoCommand",
+ "inputs": {
+ "runCommand": [
+ "echo 'Hello, world!'"
+ ]
+ }
+ }
+ ]
+ }
+ """;
+
+ CreateDocumentRequest request = CreateDocumentRequest.builder()
+ .content(jsonData)
+ .name(docName)
+ .documentType(DocumentType.COMMAND)
+ .build();
+
+ CompletableFuture future = getAsyncClient().createDocument(request);
+ future.thenAccept(response -> {
+ System.out.println("The status of the SSM document is " + response.documentDescription().status());
+ }).exceptionally(ex -> {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof DocumentAlreadyExistsException) {
+ throw new CompletionException(cause);
+ } else if (cause instanceof SsmException) {
+ throw new CompletionException(cause);
+ } else {
+ throw new RuntimeException(cause);
+ }
+ }).join();
+ }
+ // snippet-end:[ssm.java2.create_doc.main]
+
+ // snippet-start:[ssm.java2.update_window.main]
+ /**
+ * Updates an SSM maintenance window asynchronously.
+ *
+ * @param id The ID of the maintenance window to update.
+ * @param name The new name for the maintenance window.
+ *
+ * This method initiates an asynchronous request to update an SSM maintenance window.
+ * If the request is successful, it prints a success message.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public void updateSSMMaintenanceWindow(String id, String name) throws SsmException {
+ UpdateMaintenanceWindowRequest updateRequest = UpdateMaintenanceWindowRequest.builder()
+ .windowId(id)
+ .allowUnassociatedTargets(true)
+ .duration(24)
+ .enabled(true)
+ .name(name)
+ .schedule("cron(0 0 ? * MON *)")
+ .build();
+
+ CompletableFuture future = getAsyncClient().updateMaintenanceWindow(updateRequest);
+ future.whenComplete((response, ex) -> {
+ if (response != null) {
+ System.out.println("The SSM maintenance window was successfully updated");
+ } else {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof SsmException) {
+ throw new CompletionException(cause);
+ } else {
+ throw new RuntimeException(cause);
+ }
+ }
+ }).join();
+ }
+ // snippet-end:[ssm.java2.update_window.main]
+
+ // snippet-start:[ssm.java2.create_window.main]
+ /**
+ * Creates an SSM maintenance window asynchronously.
+ *
+ * @param winName The name of the maintenance window.
+ * @return The ID of the created or existing maintenance window.
+ *
+ * This method initiates an asynchronous request to create an SSM maintenance window.
+ * If the request is successful, it prints the maintenance window ID.
+ * If an exception occurs, it handles the error appropriately.
+ */
+ public String createMaintenanceWindow(String winName) throws SsmException, DocumentAlreadyExistsException {
+ CreateMaintenanceWindowRequest request = CreateMaintenanceWindowRequest.builder()
+ .name(winName)
+ .description("This is my maintenance window")
+ .allowUnassociatedTargets(true)
+ .duration(2)
+ .cutoff(1)
+ .schedule("cron(0 10 ? * MON-FRI *)")
+ .build();
+
+ CompletableFuture future = getAsyncClient().createMaintenanceWindow(request);
+ final String[] windowId = {null};
+ future.whenComplete((response, ex) -> {
+ if (response != null) {
+ String maintenanceWindowId = response.windowId();
+ System.out.println("The maintenance window id is " + maintenanceWindowId);
+ windowId[0] = maintenanceWindowId;
+ } else {
+ Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex;
+ if (cause instanceof DocumentAlreadyExistsException) {
+ throw new CompletionException(cause);
+ } else if (cause instanceof SsmException) {
+ throw new CompletionException(cause);
+ } else {
+ throw new RuntimeException(cause);
+ }
+ }
+ }).join();
+
+ if (windowId[0] == null) {
+ MaintenanceWindowFilter filter = MaintenanceWindowFilter.builder()
+ .key("name")
+ .values(winName)
+ .build();
+
+ DescribeMaintenanceWindowsRequest winRequest = DescribeMaintenanceWindowsRequest.builder()
+ .filters(filter)
+ .build();
+
+ CompletableFuture describeFuture = getAsyncClient().describeMaintenanceWindows(winRequest);
+ describeFuture.whenComplete((describeResponse, describeEx) -> {
+ if (describeResponse != null) {
+ List windows = describeResponse.windowIdentities();
+ if (!windows.isEmpty()) {
+ windowId[0] = windows.get(0).windowId();
+ System.out.println("Window ID: " + windowId[0]);
+ } else {
+ System.out.println("Window not found.");
+ windowId[0] = "";
+ }
+ } else {
+ Throwable describeCause = (describeEx instanceof CompletionException) ? describeEx.getCause() : describeEx;
+ throw new RuntimeException("Error describing maintenance windows: " + describeCause.getMessage(), describeCause);
+ }
+ }).join();
+ }
+
+ return windowId[0];
+ }
+ // snippet-end:[ssm.java2.create_window.main]
+}
+// snippet-end:[ssm.java2.actions.main]
\ No newline at end of file
diff --git a/javav2/example_code/ssm/src/main/java/com/example/scenario/SSMScenario.java b/javav2/example_code/ssm/src/main/java/com/example/scenario/SSMScenario.java
index 6a1ec33f079..9476ba0a45f 100644
--- a/javav2/example_code/ssm/src/main/java/com/example/scenario/SSMScenario.java
+++ b/javav2/example_code/ssm/src/main/java/com/example/scenario/SSMScenario.java
@@ -4,76 +4,14 @@
package com.example.scenario;
// snippet-start:[ssm.java2.scenario.main]
-import software.amazon.awssdk.regions.Region;
-import software.amazon.awssdk.services.ssm.SsmClient;
-import software.amazon.awssdk.services.ssm.model.CommandInvocation;
-import software.amazon.awssdk.services.ssm.model.CommandInvocationStatus;
-import software.amazon.awssdk.services.ssm.model.CreateDocumentRequest;
-import software.amazon.awssdk.services.ssm.model.CreateDocumentResponse;
-import software.amazon.awssdk.services.ssm.model.CreateMaintenanceWindowRequest;
-import software.amazon.awssdk.services.ssm.model.CreateMaintenanceWindowResponse;
-import software.amazon.awssdk.services.ssm.model.CreateOpsItemRequest;
-import software.amazon.awssdk.services.ssm.model.CreateOpsItemResponse;
-import software.amazon.awssdk.services.ssm.model.DeleteDocumentRequest;
-import software.amazon.awssdk.services.ssm.model.DeleteMaintenanceWindowRequest;
-import software.amazon.awssdk.services.ssm.model.DeleteOpsItemRequest;
-import software.amazon.awssdk.services.ssm.model.DescribeDocumentRequest;
-import software.amazon.awssdk.services.ssm.model.DescribeDocumentResponse;
-import software.amazon.awssdk.services.ssm.model.DescribeMaintenanceWindowsRequest;
-import software.amazon.awssdk.services.ssm.model.DescribeMaintenanceWindowsResponse;
-import software.amazon.awssdk.services.ssm.model.DescribeOpsItemsRequest;
-import software.amazon.awssdk.services.ssm.model.DescribeOpsItemsResponse;
import software.amazon.awssdk.services.ssm.model.DocumentAlreadyExistsException;
-import software.amazon.awssdk.services.ssm.model.DocumentType;
-import software.amazon.awssdk.services.ssm.model.GetCommandInvocationRequest;
-import software.amazon.awssdk.services.ssm.model.GetCommandInvocationResponse;
-import software.amazon.awssdk.services.ssm.model.GetOpsItemRequest;
-import software.amazon.awssdk.services.ssm.model.GetOpsItemResponse;
-import software.amazon.awssdk.services.ssm.model.ListCommandInvocationsRequest;
-import software.amazon.awssdk.services.ssm.model.ListCommandInvocationsResponse;
-import software.amazon.awssdk.services.ssm.model.MaintenanceWindowFilter;
-import software.amazon.awssdk.services.ssm.model.MaintenanceWindowIdentity;
-import software.amazon.awssdk.services.ssm.model.OpsItemDataValue;
-import software.amazon.awssdk.services.ssm.model.OpsItemFilter;
-import software.amazon.awssdk.services.ssm.model.OpsItemFilterKey;
-import software.amazon.awssdk.services.ssm.model.OpsItemFilterOperator;
-import software.amazon.awssdk.services.ssm.model.OpsItemStatus;
-import software.amazon.awssdk.services.ssm.model.OpsItemSummary;
-import software.amazon.awssdk.services.ssm.model.SendCommandRequest;
-import software.amazon.awssdk.services.ssm.model.SendCommandResponse;
import software.amazon.awssdk.services.ssm.model.SsmException;
-import software.amazon.awssdk.services.ssm.model.UpdateMaintenanceWindowRequest;
-import software.amazon.awssdk.services.ssm.model.UpdateOpsItemRequest;
-import java.time.ZoneId;
-import java.time.format.DateTimeFormatter;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Scanner;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Before running this Java V2 code example, set up your development
- * environment, including your credentials.
- *
- * For more information, see the following documentation topic:
- *
- * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html
- *
- *
- * This Java program performs these tasks:
- * 1. Creates an AWS Systems Manager maintenance window with a default name or a user-provided name.
- * 2. Modifies the maintenance window schedule.
- * 3. Creates a Systems Manager document with a default name or a user-provided name.
- * 4. Sends a command to a specified EC2 instance using the created Systems Manager document and displays the time when the command was invoked.
- * 5. Creates a Systems Manager OpsItem with a predefined title, source, category, and severity.
- * 6. Updates and resolves the created OpsItem.
- * 7. Deletes the Systems Manager maintenance window, OpsItem, and document.
- */
+import java.util.Scanner;
public class SSMScenario {
public static final String DASHES = new String(new char[80]).replace("\0", "-");
- public static void main(String[] args) throws InterruptedException {
+
+ public static void main(String[] args) {
String usage = """
Usage: