/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // TODO: temporary fix for naming conflicts on Windows. #ifdef _WIN32 #ifdef GetObject #undef GetObject #endif #endif namespace Aws { namespace Utils { template< typename R, typename E> class Outcome; namespace Threading { class Executor; } // namespace Threading namespace Xml { class XmlDocument; } // namespace Xml } // namespace Utils namespace Auth { class AWSCredentials; class AWSCredentialsProvider; } // namespace Auth namespace S3 { namespace Model { class AbortMultipartUploadRequest; class CompleteMultipartUploadRequest; class CopyObjectRequest; class CreateBucketRequest; class CreateMultipartUploadRequest; class DeleteBucketRequest; class DeleteBucketAnalyticsConfigurationRequest; class DeleteBucketCorsRequest; class DeleteBucketEncryptionRequest; class DeleteBucketIntelligentTieringConfigurationRequest; class DeleteBucketInventoryConfigurationRequest; class DeleteBucketLifecycleRequest; class DeleteBucketMetricsConfigurationRequest; class DeleteBucketOwnershipControlsRequest; class DeleteBucketPolicyRequest; class DeleteBucketReplicationRequest; class DeleteBucketTaggingRequest; class DeleteBucketWebsiteRequest; class DeleteObjectRequest; class DeleteObjectTaggingRequest; class DeleteObjectsRequest; class DeletePublicAccessBlockRequest; class GetBucketAccelerateConfigurationRequest; class GetBucketAclRequest; class GetBucketAnalyticsConfigurationRequest; class GetBucketCorsRequest; class GetBucketEncryptionRequest; class GetBucketIntelligentTieringConfigurationRequest; class GetBucketInventoryConfigurationRequest; class GetBucketLifecycleConfigurationRequest; class GetBucketLocationRequest; class GetBucketLoggingRequest; class GetBucketMetricsConfigurationRequest; class GetBucketNotificationConfigurationRequest; class GetBucketOwnershipControlsRequest; class GetBucketPolicyRequest; class GetBucketPolicyStatusRequest; class GetBucketReplicationRequest; class GetBucketRequestPaymentRequest; class GetBucketTaggingRequest; class GetBucketVersioningRequest; class GetBucketWebsiteRequest; class GetObjectRequest; class GetObjectAclRequest; class GetObjectLegalHoldRequest; class GetObjectLockConfigurationRequest; class GetObjectRetentionRequest; class GetObjectTaggingRequest; class GetObjectTorrentRequest; class GetPublicAccessBlockRequest; class HeadBucketRequest; class HeadObjectRequest; class ListBucketAnalyticsConfigurationsRequest; class ListBucketIntelligentTieringConfigurationsRequest; class ListBucketInventoryConfigurationsRequest; class ListBucketMetricsConfigurationsRequest; class ListMultipartUploadsRequest; class ListObjectVersionsRequest; class ListObjectsRequest; class ListObjectsV2Request; class ListPartsRequest; class PutBucketAccelerateConfigurationRequest; class PutBucketAclRequest; class PutBucketAnalyticsConfigurationRequest; class PutBucketCorsRequest; class PutBucketEncryptionRequest; class PutBucketIntelligentTieringConfigurationRequest; class PutBucketInventoryConfigurationRequest; class PutBucketLifecycleConfigurationRequest; class PutBucketLoggingRequest; class PutBucketMetricsConfigurationRequest; class PutBucketNotificationConfigurationRequest; class PutBucketOwnershipControlsRequest; class PutBucketPolicyRequest; class PutBucketReplicationRequest; class PutBucketRequestPaymentRequest; class PutBucketTaggingRequest; class PutBucketVersioningRequest; class PutBucketWebsiteRequest; class PutObjectRequest; class PutObjectAclRequest; class PutObjectLegalHoldRequest; class PutObjectLockConfigurationRequest; class PutObjectRetentionRequest; class PutObjectTaggingRequest; class PutPublicAccessBlockRequest; class RestoreObjectRequest; class SelectObjectContentRequest; class UploadPartRequest; class UploadPartCopyRequest; class WriteGetObjectResponseRequest; typedef Aws::Utils::Outcome AbortMultipartUploadOutcome; typedef Aws::Utils::Outcome CompleteMultipartUploadOutcome; typedef Aws::Utils::Outcome CopyObjectOutcome; typedef Aws::Utils::Outcome CreateBucketOutcome; typedef Aws::Utils::Outcome CreateMultipartUploadOutcome; typedef Aws::Utils::Outcome DeleteBucketOutcome; typedef Aws::Utils::Outcome DeleteBucketAnalyticsConfigurationOutcome; typedef Aws::Utils::Outcome DeleteBucketCorsOutcome; typedef Aws::Utils::Outcome DeleteBucketEncryptionOutcome; typedef Aws::Utils::Outcome DeleteBucketIntelligentTieringConfigurationOutcome; typedef Aws::Utils::Outcome DeleteBucketInventoryConfigurationOutcome; typedef Aws::Utils::Outcome DeleteBucketLifecycleOutcome; typedef Aws::Utils::Outcome DeleteBucketMetricsConfigurationOutcome; typedef Aws::Utils::Outcome DeleteBucketOwnershipControlsOutcome; typedef Aws::Utils::Outcome DeleteBucketPolicyOutcome; typedef Aws::Utils::Outcome DeleteBucketReplicationOutcome; typedef Aws::Utils::Outcome DeleteBucketTaggingOutcome; typedef Aws::Utils::Outcome DeleteBucketWebsiteOutcome; typedef Aws::Utils::Outcome DeleteObjectOutcome; typedef Aws::Utils::Outcome DeleteObjectTaggingOutcome; typedef Aws::Utils::Outcome DeleteObjectsOutcome; typedef Aws::Utils::Outcome DeletePublicAccessBlockOutcome; typedef Aws::Utils::Outcome GetBucketAccelerateConfigurationOutcome; typedef Aws::Utils::Outcome GetBucketAclOutcome; typedef Aws::Utils::Outcome GetBucketAnalyticsConfigurationOutcome; typedef Aws::Utils::Outcome GetBucketCorsOutcome; typedef Aws::Utils::Outcome GetBucketEncryptionOutcome; typedef Aws::Utils::Outcome GetBucketIntelligentTieringConfigurationOutcome; typedef Aws::Utils::Outcome GetBucketInventoryConfigurationOutcome; typedef Aws::Utils::Outcome GetBucketLifecycleConfigurationOutcome; typedef Aws::Utils::Outcome GetBucketLocationOutcome; typedef Aws::Utils::Outcome GetBucketLoggingOutcome; typedef Aws::Utils::Outcome GetBucketMetricsConfigurationOutcome; typedef Aws::Utils::Outcome GetBucketNotificationConfigurationOutcome; typedef Aws::Utils::Outcome GetBucketOwnershipControlsOutcome; typedef Aws::Utils::Outcome GetBucketPolicyOutcome; typedef Aws::Utils::Outcome GetBucketPolicyStatusOutcome; typedef Aws::Utils::Outcome GetBucketReplicationOutcome; typedef Aws::Utils::Outcome GetBucketRequestPaymentOutcome; typedef Aws::Utils::Outcome GetBucketTaggingOutcome; typedef Aws::Utils::Outcome GetBucketVersioningOutcome; typedef Aws::Utils::Outcome GetBucketWebsiteOutcome; typedef Aws::Utils::Outcome GetObjectOutcome; typedef Aws::Utils::Outcome GetObjectAclOutcome; typedef Aws::Utils::Outcome GetObjectLegalHoldOutcome; typedef Aws::Utils::Outcome GetObjectLockConfigurationOutcome; typedef Aws::Utils::Outcome GetObjectRetentionOutcome; typedef Aws::Utils::Outcome GetObjectTaggingOutcome; typedef Aws::Utils::Outcome GetObjectTorrentOutcome; typedef Aws::Utils::Outcome GetPublicAccessBlockOutcome; typedef Aws::Utils::Outcome HeadBucketOutcome; typedef Aws::Utils::Outcome HeadObjectOutcome; typedef Aws::Utils::Outcome ListBucketAnalyticsConfigurationsOutcome; typedef Aws::Utils::Outcome ListBucketIntelligentTieringConfigurationsOutcome; typedef Aws::Utils::Outcome ListBucketInventoryConfigurationsOutcome; typedef Aws::Utils::Outcome ListBucketMetricsConfigurationsOutcome; typedef Aws::Utils::Outcome ListBucketsOutcome; typedef Aws::Utils::Outcome ListMultipartUploadsOutcome; typedef Aws::Utils::Outcome ListObjectVersionsOutcome; typedef Aws::Utils::Outcome ListObjectsOutcome; typedef Aws::Utils::Outcome ListObjectsV2Outcome; typedef Aws::Utils::Outcome ListPartsOutcome; typedef Aws::Utils::Outcome PutBucketAccelerateConfigurationOutcome; typedef Aws::Utils::Outcome PutBucketAclOutcome; typedef Aws::Utils::Outcome PutBucketAnalyticsConfigurationOutcome; typedef Aws::Utils::Outcome PutBucketCorsOutcome; typedef Aws::Utils::Outcome PutBucketEncryptionOutcome; typedef Aws::Utils::Outcome PutBucketIntelligentTieringConfigurationOutcome; typedef Aws::Utils::Outcome PutBucketInventoryConfigurationOutcome; typedef Aws::Utils::Outcome PutBucketLifecycleConfigurationOutcome; typedef Aws::Utils::Outcome PutBucketLoggingOutcome; typedef Aws::Utils::Outcome PutBucketMetricsConfigurationOutcome; typedef Aws::Utils::Outcome PutBucketNotificationConfigurationOutcome; typedef Aws::Utils::Outcome PutBucketOwnershipControlsOutcome; typedef Aws::Utils::Outcome PutBucketPolicyOutcome; typedef Aws::Utils::Outcome PutBucketReplicationOutcome; typedef Aws::Utils::Outcome PutBucketRequestPaymentOutcome; typedef Aws::Utils::Outcome PutBucketTaggingOutcome; typedef Aws::Utils::Outcome PutBucketVersioningOutcome; typedef Aws::Utils::Outcome PutBucketWebsiteOutcome; typedef Aws::Utils::Outcome PutObjectOutcome; typedef Aws::Utils::Outcome PutObjectAclOutcome; typedef Aws::Utils::Outcome PutObjectLegalHoldOutcome; typedef Aws::Utils::Outcome PutObjectLockConfigurationOutcome; typedef Aws::Utils::Outcome PutObjectRetentionOutcome; typedef Aws::Utils::Outcome PutObjectTaggingOutcome; typedef Aws::Utils::Outcome PutPublicAccessBlockOutcome; typedef Aws::Utils::Outcome RestoreObjectOutcome; typedef Aws::Utils::Outcome SelectObjectContentOutcome; typedef Aws::Utils::Outcome UploadPartOutcome; typedef Aws::Utils::Outcome UploadPartCopyOutcome; typedef Aws::Utils::Outcome WriteGetObjectResponseOutcome; typedef std::future AbortMultipartUploadOutcomeCallable; typedef std::future CompleteMultipartUploadOutcomeCallable; typedef std::future CopyObjectOutcomeCallable; typedef std::future CreateBucketOutcomeCallable; typedef std::future CreateMultipartUploadOutcomeCallable; typedef std::future DeleteBucketOutcomeCallable; typedef std::future DeleteBucketAnalyticsConfigurationOutcomeCallable; typedef std::future DeleteBucketCorsOutcomeCallable; typedef std::future DeleteBucketEncryptionOutcomeCallable; typedef std::future DeleteBucketIntelligentTieringConfigurationOutcomeCallable; typedef std::future DeleteBucketInventoryConfigurationOutcomeCallable; typedef std::future DeleteBucketLifecycleOutcomeCallable; typedef std::future DeleteBucketMetricsConfigurationOutcomeCallable; typedef std::future DeleteBucketOwnershipControlsOutcomeCallable; typedef std::future DeleteBucketPolicyOutcomeCallable; typedef std::future DeleteBucketReplicationOutcomeCallable; typedef std::future DeleteBucketTaggingOutcomeCallable; typedef std::future DeleteBucketWebsiteOutcomeCallable; typedef std::future DeleteObjectOutcomeCallable; typedef std::future DeleteObjectTaggingOutcomeCallable; typedef std::future DeleteObjectsOutcomeCallable; typedef std::future DeletePublicAccessBlockOutcomeCallable; typedef std::future GetBucketAccelerateConfigurationOutcomeCallable; typedef std::future GetBucketAclOutcomeCallable; typedef std::future GetBucketAnalyticsConfigurationOutcomeCallable; typedef std::future GetBucketCorsOutcomeCallable; typedef std::future GetBucketEncryptionOutcomeCallable; typedef std::future GetBucketIntelligentTieringConfigurationOutcomeCallable; typedef std::future GetBucketInventoryConfigurationOutcomeCallable; typedef std::future GetBucketLifecycleConfigurationOutcomeCallable; typedef std::future GetBucketLocationOutcomeCallable; typedef std::future GetBucketLoggingOutcomeCallable; typedef std::future GetBucketMetricsConfigurationOutcomeCallable; typedef std::future GetBucketNotificationConfigurationOutcomeCallable; typedef std::future GetBucketOwnershipControlsOutcomeCallable; typedef std::future GetBucketPolicyOutcomeCallable; typedef std::future GetBucketPolicyStatusOutcomeCallable; typedef std::future GetBucketReplicationOutcomeCallable; typedef std::future GetBucketRequestPaymentOutcomeCallable; typedef std::future GetBucketTaggingOutcomeCallable; typedef std::future GetBucketVersioningOutcomeCallable; typedef std::future GetBucketWebsiteOutcomeCallable; typedef std::future GetObjectOutcomeCallable; typedef std::future GetObjectAclOutcomeCallable; typedef std::future GetObjectLegalHoldOutcomeCallable; typedef std::future GetObjectLockConfigurationOutcomeCallable; typedef std::future GetObjectRetentionOutcomeCallable; typedef std::future GetObjectTaggingOutcomeCallable; typedef std::future GetObjectTorrentOutcomeCallable; typedef std::future GetPublicAccessBlockOutcomeCallable; typedef std::future HeadBucketOutcomeCallable; typedef std::future HeadObjectOutcomeCallable; typedef std::future ListBucketAnalyticsConfigurationsOutcomeCallable; typedef std::future ListBucketIntelligentTieringConfigurationsOutcomeCallable; typedef std::future ListBucketInventoryConfigurationsOutcomeCallable; typedef std::future ListBucketMetricsConfigurationsOutcomeCallable; typedef std::future ListBucketsOutcomeCallable; typedef std::future ListMultipartUploadsOutcomeCallable; typedef std::future ListObjectVersionsOutcomeCallable; typedef std::future ListObjectsOutcomeCallable; typedef std::future ListObjectsV2OutcomeCallable; typedef std::future ListPartsOutcomeCallable; typedef std::future PutBucketAccelerateConfigurationOutcomeCallable; typedef std::future PutBucketAclOutcomeCallable; typedef std::future PutBucketAnalyticsConfigurationOutcomeCallable; typedef std::future PutBucketCorsOutcomeCallable; typedef std::future PutBucketEncryptionOutcomeCallable; typedef std::future PutBucketIntelligentTieringConfigurationOutcomeCallable; typedef std::future PutBucketInventoryConfigurationOutcomeCallable; typedef std::future PutBucketLifecycleConfigurationOutcomeCallable; typedef std::future PutBucketLoggingOutcomeCallable; typedef std::future PutBucketMetricsConfigurationOutcomeCallable; typedef std::future PutBucketNotificationConfigurationOutcomeCallable; typedef std::future PutBucketOwnershipControlsOutcomeCallable; typedef std::future PutBucketPolicyOutcomeCallable; typedef std::future PutBucketReplicationOutcomeCallable; typedef std::future PutBucketRequestPaymentOutcomeCallable; typedef std::future PutBucketTaggingOutcomeCallable; typedef std::future PutBucketVersioningOutcomeCallable; typedef std::future PutBucketWebsiteOutcomeCallable; typedef std::future PutObjectOutcomeCallable; typedef std::future PutObjectAclOutcomeCallable; typedef std::future PutObjectLegalHoldOutcomeCallable; typedef std::future PutObjectLockConfigurationOutcomeCallable; typedef std::future PutObjectRetentionOutcomeCallable; typedef std::future PutObjectTaggingOutcomeCallable; typedef std::future PutPublicAccessBlockOutcomeCallable; typedef std::future RestoreObjectOutcomeCallable; typedef std::future SelectObjectContentOutcomeCallable; typedef std::future UploadPartOutcomeCallable; typedef std::future UploadPartCopyOutcomeCallable; typedef std::future WriteGetObjectResponseOutcomeCallable; } // namespace Model namespace SSEHeaders { static const char SERVER_SIDE_ENCRYPTION[] = "x-amz-server-side-encryption"; static const char SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID[] = "x-amz-server-side-encryption-aws-kms-key-id"; static const char SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM[] = "x-amz-server-side-encryption-customer-algorithm"; static const char SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY[] = "x-amz-server-side-encryption-customer-key"; static const char SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5[] = "x-amz-server-side-encryption-customer-key-MD5"; } // SS3Headers class S3Client; typedef std::function&) > AbortMultipartUploadResponseReceivedHandler; typedef std::function&) > CompleteMultipartUploadResponseReceivedHandler; typedef std::function&) > CopyObjectResponseReceivedHandler; typedef std::function&) > CreateBucketResponseReceivedHandler; typedef std::function&) > CreateMultipartUploadResponseReceivedHandler; typedef std::function&) > DeleteBucketResponseReceivedHandler; typedef std::function&) > DeleteBucketAnalyticsConfigurationResponseReceivedHandler; typedef std::function&) > DeleteBucketCorsResponseReceivedHandler; typedef std::function&) > DeleteBucketEncryptionResponseReceivedHandler; typedef std::function&) > DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler; typedef std::function&) > DeleteBucketInventoryConfigurationResponseReceivedHandler; typedef std::function&) > DeleteBucketLifecycleResponseReceivedHandler; typedef std::function&) > DeleteBucketMetricsConfigurationResponseReceivedHandler; typedef std::function&) > DeleteBucketOwnershipControlsResponseReceivedHandler; typedef std::function&) > DeleteBucketPolicyResponseReceivedHandler; typedef std::function&) > DeleteBucketReplicationResponseReceivedHandler; typedef std::function&) > DeleteBucketTaggingResponseReceivedHandler; typedef std::function&) > DeleteBucketWebsiteResponseReceivedHandler; typedef std::function&) > DeleteObjectResponseReceivedHandler; typedef std::function&) > DeleteObjectTaggingResponseReceivedHandler; typedef std::function&) > DeleteObjectsResponseReceivedHandler; typedef std::function&) > DeletePublicAccessBlockResponseReceivedHandler; typedef std::function&) > GetBucketAccelerateConfigurationResponseReceivedHandler; typedef std::function&) > GetBucketAclResponseReceivedHandler; typedef std::function&) > GetBucketAnalyticsConfigurationResponseReceivedHandler; typedef std::function&) > GetBucketCorsResponseReceivedHandler; typedef std::function&) > GetBucketEncryptionResponseReceivedHandler; typedef std::function&) > GetBucketIntelligentTieringConfigurationResponseReceivedHandler; typedef std::function&) > GetBucketInventoryConfigurationResponseReceivedHandler; typedef std::function&) > GetBucketLifecycleConfigurationResponseReceivedHandler; typedef std::function&) > GetBucketLocationResponseReceivedHandler; typedef std::function&) > GetBucketLoggingResponseReceivedHandler; typedef std::function&) > GetBucketMetricsConfigurationResponseReceivedHandler; typedef std::function&) > GetBucketNotificationConfigurationResponseReceivedHandler; typedef std::function&) > GetBucketOwnershipControlsResponseReceivedHandler; typedef std::function&) > GetBucketPolicyResponseReceivedHandler; typedef std::function&) > GetBucketPolicyStatusResponseReceivedHandler; typedef std::function&) > GetBucketReplicationResponseReceivedHandler; typedef std::function&) > GetBucketRequestPaymentResponseReceivedHandler; typedef std::function&) > GetBucketTaggingResponseReceivedHandler; typedef std::function&) > GetBucketVersioningResponseReceivedHandler; typedef std::function&) > GetBucketWebsiteResponseReceivedHandler; typedef std::function&) > GetObjectResponseReceivedHandler; typedef std::function&) > GetObjectAclResponseReceivedHandler; typedef std::function&) > GetObjectLegalHoldResponseReceivedHandler; typedef std::function&) > GetObjectLockConfigurationResponseReceivedHandler; typedef std::function&) > GetObjectRetentionResponseReceivedHandler; typedef std::function&) > GetObjectTaggingResponseReceivedHandler; typedef std::function&) > GetObjectTorrentResponseReceivedHandler; typedef std::function&) > GetPublicAccessBlockResponseReceivedHandler; typedef std::function&) > HeadBucketResponseReceivedHandler; typedef std::function&) > HeadObjectResponseReceivedHandler; typedef std::function&) > ListBucketAnalyticsConfigurationsResponseReceivedHandler; typedef std::function&) > ListBucketIntelligentTieringConfigurationsResponseReceivedHandler; typedef std::function&) > ListBucketInventoryConfigurationsResponseReceivedHandler; typedef std::function&) > ListBucketMetricsConfigurationsResponseReceivedHandler; typedef std::function&) > ListBucketsResponseReceivedHandler; typedef std::function&) > ListMultipartUploadsResponseReceivedHandler; typedef std::function&) > ListObjectVersionsResponseReceivedHandler; typedef std::function&) > ListObjectsResponseReceivedHandler; typedef std::function&) > ListObjectsV2ResponseReceivedHandler; typedef std::function&) > ListPartsResponseReceivedHandler; typedef std::function&) > PutBucketAccelerateConfigurationResponseReceivedHandler; typedef std::function&) > PutBucketAclResponseReceivedHandler; typedef std::function&) > PutBucketAnalyticsConfigurationResponseReceivedHandler; typedef std::function&) > PutBucketCorsResponseReceivedHandler; typedef std::function&) > PutBucketEncryptionResponseReceivedHandler; typedef std::function&) > PutBucketIntelligentTieringConfigurationResponseReceivedHandler; typedef std::function&) > PutBucketInventoryConfigurationResponseReceivedHandler; typedef std::function&) > PutBucketLifecycleConfigurationResponseReceivedHandler; typedef std::function&) > PutBucketLoggingResponseReceivedHandler; typedef std::function&) > PutBucketMetricsConfigurationResponseReceivedHandler; typedef std::function&) > PutBucketNotificationConfigurationResponseReceivedHandler; typedef std::function&) > PutBucketOwnershipControlsResponseReceivedHandler; typedef std::function&) > PutBucketPolicyResponseReceivedHandler; typedef std::function&) > PutBucketReplicationResponseReceivedHandler; typedef std::function&) > PutBucketRequestPaymentResponseReceivedHandler; typedef std::function&) > PutBucketTaggingResponseReceivedHandler; typedef std::function&) > PutBucketVersioningResponseReceivedHandler; typedef std::function&) > PutBucketWebsiteResponseReceivedHandler; typedef std::function&) > PutObjectResponseReceivedHandler; typedef std::function&) > PutObjectAclResponseReceivedHandler; typedef std::function&) > PutObjectLegalHoldResponseReceivedHandler; typedef std::function&) > PutObjectLockConfigurationResponseReceivedHandler; typedef std::function&) > PutObjectRetentionResponseReceivedHandler; typedef std::function&) > PutObjectTaggingResponseReceivedHandler; typedef std::function&) > PutPublicAccessBlockResponseReceivedHandler; typedef std::function&) > RestoreObjectResponseReceivedHandler; typedef std::function&) > SelectObjectContentResponseReceivedHandler; typedef std::function&) > UploadPartResponseReceivedHandler; typedef std::function&) > UploadPartCopyResponseReceivedHandler; typedef std::function&) > WriteGetObjectResponseResponseReceivedHandler; // Get endpoint, signer region and signer service name after computing the endpoint. struct ComputeEndpointResult { ComputeEndpointResult(const Aws::String& endpointName = {}, const Aws::String& region = {}, const Aws::String& serviceName = {}, const Aws::String signer = Aws::Auth::SIGV4_SIGNER) : endpoint(endpointName), signerRegion(region), signerServiceName(serviceName), signerName(signer) {} Aws::String endpoint; Aws::String signerRegion; Aws::String signerServiceName; Aws::String signerName; }; typedef Aws::Utils::Outcome> ComputeEndpointOutcome; //max expiration for presigned urls in s3 is 7 days. static const unsigned MAX_EXPIRATION_SECONDS = 7 * 24 * 60 * 60; /** *

*/ enum class US_EAST_1_REGIONAL_ENDPOINT_OPTION { NOT_SET, LEGACY, //stands for using global endpoint for us-east-1, REGIONAL //stands for using regional endpoint for us-east-1 }; class AWS_S3_API S3Client : public Aws::Client::AWSXMLClient { public: typedef Aws::Client::AWSXMLClient BASECLASS; /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ S3Client(const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration(), Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads = Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, bool useVirtualAddressing = true, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ S3Client(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration(), Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads = Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, bool useVirtualAddressing = true, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ S3Client(const std::shared_ptr& credentialsProvider, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration(), Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads = Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, bool useVirtualAddressing = true, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET); virtual ~S3Client(); /** *

This action aborts a multipart upload. After a multipart upload is aborted, * no additional parts can be uploaded using that upload ID. The storage consumed * by any previously uploaded parts will be freed. However, if any part uploads are * currently in progress, those part uploads might or might not succeed. As a * result, it might be necessary to abort a given multipart upload multiple times * in order to completely free all storage consumed by all parts.

To verify * that all parts have been removed, so you don't get charged for the part storage, * you should call the ListParts * action and ensure that the parts list is empty.

For information about * permissions required to use the multipart upload, see Multipart * Upload and Permissions.

The following operations are related to * AbortMultipartUpload:

See Also:

AWS * API Reference

*/ virtual Model::AbortMultipartUploadOutcome AbortMultipartUpload(const Model::AbortMultipartUploadRequest& request) const; /** *

This action aborts a multipart upload. After a multipart upload is aborted, * no additional parts can be uploaded using that upload ID. The storage consumed * by any previously uploaded parts will be freed. However, if any part uploads are * currently in progress, those part uploads might or might not succeed. As a * result, it might be necessary to abort a given multipart upload multiple times * in order to completely free all storage consumed by all parts.

To verify * that all parts have been removed, so you don't get charged for the part storage, * you should call the ListParts * action and ensure that the parts list is empty.

For information about * permissions required to use the multipart upload, see Multipart * Upload and Permissions.

The following operations are related to * AbortMultipartUpload:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::AbortMultipartUploadOutcomeCallable AbortMultipartUploadCallable(const Model::AbortMultipartUploadRequest& request) const; /** *

This action aborts a multipart upload. After a multipart upload is aborted, * no additional parts can be uploaded using that upload ID. The storage consumed * by any previously uploaded parts will be freed. However, if any part uploads are * currently in progress, those part uploads might or might not succeed. As a * result, it might be necessary to abort a given multipart upload multiple times * in order to completely free all storage consumed by all parts.

To verify * that all parts have been removed, so you don't get charged for the part storage, * you should call the ListParts * action and ensure that the parts list is empty.

For information about * permissions required to use the multipart upload, see Multipart * Upload and Permissions.

The following operations are related to * AbortMultipartUpload:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void AbortMultipartUploadAsync(const Model::AbortMultipartUploadRequest& request, const AbortMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Completes a multipart upload by assembling previously uploaded parts.

*

You first initiate the multipart upload and then upload all parts using the * UploadPart * operation. After successfully uploading all relevant parts of an upload, you * call this action to complete the upload. Upon receiving this request, Amazon S3 * concatenates all the parts in ascending order by part number to create a new * object. In the Complete Multipart Upload request, you must provide the parts * list. You must ensure that the parts list is complete. This action concatenates * the parts that you provide in the list. For each part in the list, you must * provide the part number and the ETag value, returned after that * part was uploaded.

Processing of a Complete Multipart Upload request * could take several minutes to complete. After Amazon S3 begins processing the * request, it sends an HTTP response header that specifies a 200 OK response. * While processing is in progress, Amazon S3 periodically sends white space * characters to keep the connection from timing out. Because a request could fail * after the initial 200 OK response has been sent, it is important that you check * the response body to determine whether the request succeeded.

Note that * if CompleteMultipartUpload fails, applications should be prepared * to retry the failed requests. For more information, see Amazon * S3 Error Best Practices.

For more information about multipart * uploads, see Uploading * Objects Using Multipart Upload.

For information about permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

CompleteMultipartUpload has the * following special errors:

  • Error code: * EntityTooSmall

    • Description: Your proposed upload * is smaller than the minimum allowed object size. Each part must be at least 5 MB * in size, except the last part.

    • 400 Bad Request

    *
  • Error code: InvalidPart

    • *

      Description: One or more of the specified parts could not be found. The part * might not have been uploaded, or the specified entity tag might not have matched * the part's entity tag.

    • 400 Bad Request

  • *
  • Error code: InvalidPartOrder

    • Description: * The list of parts was not in ascending order. The parts list must be specified * in order by part number.

    • 400 Bad Request

  • *
  • Error code: NoSuchUpload

    • Description: The * specified multipart upload does not exist. The upload ID might be invalid, or * the multipart upload might have been aborted or completed.

    • 404 * Not Found

The following operations are related to * CompleteMultipartUpload:

See Also:

AWS * API Reference

*/ virtual Model::CompleteMultipartUploadOutcome CompleteMultipartUpload(const Model::CompleteMultipartUploadRequest& request) const; /** *

Completes a multipart upload by assembling previously uploaded parts.

*

You first initiate the multipart upload and then upload all parts using the * UploadPart * operation. After successfully uploading all relevant parts of an upload, you * call this action to complete the upload. Upon receiving this request, Amazon S3 * concatenates all the parts in ascending order by part number to create a new * object. In the Complete Multipart Upload request, you must provide the parts * list. You must ensure that the parts list is complete. This action concatenates * the parts that you provide in the list. For each part in the list, you must * provide the part number and the ETag value, returned after that * part was uploaded.

Processing of a Complete Multipart Upload request * could take several minutes to complete. After Amazon S3 begins processing the * request, it sends an HTTP response header that specifies a 200 OK response. * While processing is in progress, Amazon S3 periodically sends white space * characters to keep the connection from timing out. Because a request could fail * after the initial 200 OK response has been sent, it is important that you check * the response body to determine whether the request succeeded.

Note that * if CompleteMultipartUpload fails, applications should be prepared * to retry the failed requests. For more information, see Amazon * S3 Error Best Practices.

For more information about multipart * uploads, see Uploading * Objects Using Multipart Upload.

For information about permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

CompleteMultipartUpload has the * following special errors:

  • Error code: * EntityTooSmall

    • Description: Your proposed upload * is smaller than the minimum allowed object size. Each part must be at least 5 MB * in size, except the last part.

    • 400 Bad Request

    *
  • Error code: InvalidPart

    • *

      Description: One or more of the specified parts could not be found. The part * might not have been uploaded, or the specified entity tag might not have matched * the part's entity tag.

    • 400 Bad Request

  • *
  • Error code: InvalidPartOrder

    • Description: * The list of parts was not in ascending order. The parts list must be specified * in order by part number.

    • 400 Bad Request

  • *
  • Error code: NoSuchUpload

    • Description: The * specified multipart upload does not exist. The upload ID might be invalid, or * the multipart upload might have been aborted or completed.

    • 404 * Not Found

The following operations are related to * CompleteMultipartUpload:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CompleteMultipartUploadOutcomeCallable CompleteMultipartUploadCallable(const Model::CompleteMultipartUploadRequest& request) const; /** *

Completes a multipart upload by assembling previously uploaded parts.

*

You first initiate the multipart upload and then upload all parts using the * UploadPart * operation. After successfully uploading all relevant parts of an upload, you * call this action to complete the upload. Upon receiving this request, Amazon S3 * concatenates all the parts in ascending order by part number to create a new * object. In the Complete Multipart Upload request, you must provide the parts * list. You must ensure that the parts list is complete. This action concatenates * the parts that you provide in the list. For each part in the list, you must * provide the part number and the ETag value, returned after that * part was uploaded.

Processing of a Complete Multipart Upload request * could take several minutes to complete. After Amazon S3 begins processing the * request, it sends an HTTP response header that specifies a 200 OK response. * While processing is in progress, Amazon S3 periodically sends white space * characters to keep the connection from timing out. Because a request could fail * after the initial 200 OK response has been sent, it is important that you check * the response body to determine whether the request succeeded.

Note that * if CompleteMultipartUpload fails, applications should be prepared * to retry the failed requests. For more information, see Amazon * S3 Error Best Practices.

For more information about multipart * uploads, see Uploading * Objects Using Multipart Upload.

For information about permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

CompleteMultipartUpload has the * following special errors:

  • Error code: * EntityTooSmall

    • Description: Your proposed upload * is smaller than the minimum allowed object size. Each part must be at least 5 MB * in size, except the last part.

    • 400 Bad Request

    *
  • Error code: InvalidPart

    • *

      Description: One or more of the specified parts could not be found. The part * might not have been uploaded, or the specified entity tag might not have matched * the part's entity tag.

    • 400 Bad Request

  • *
  • Error code: InvalidPartOrder

    • Description: * The list of parts was not in ascending order. The parts list must be specified * in order by part number.

    • 400 Bad Request

  • *
  • Error code: NoSuchUpload

    • Description: The * specified multipart upload does not exist. The upload ID might be invalid, or * the multipart upload might have been aborted or completed.

    • 404 * Not Found

The following operations are related to * CompleteMultipartUpload:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CompleteMultipartUploadAsync(const Model::CompleteMultipartUploadRequest& request, const CompleteMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a copy of an object that is already stored in Amazon S3.

*

You can store individual objects of up to 5 TB in Amazon S3. You create a * copy of your object up to 5 GB in size in a single atomic action using this API. * However, to copy an object greater than 5 GB, you must use the multipart upload * Upload Part - Copy API. For more information, see Copy * Object Using the REST Multipart Upload API.

All copy requests * must be authenticated. Additionally, you must have read access to the * source object and write access to the destination bucket. For more * information, see REST * Authentication. Both the Region that you want to copy the object from and * the Region that you want to copy the object to must be enabled for your * account.

A copy request might return an error when Amazon S3 receives the * copy request or while Amazon S3 is copying the files. If the error occurs before * the copy action starts, you receive a standard Amazon S3 error. If the error * occurs during the copy operation, the error response is embedded in the * 200 OK response. This means that a 200 OK response can * contain either a success or an error. Design your application to parse the * contents of the response and handle it appropriately.

If the copy is * successful, you receive a response with information about the copied object.

*

If the request is an HTTP 1.1 request, the response is chunk encoded. * If it were not, it would not contain the content-length, and you would need to * read the entire body.

The copy request charge is based on the * storage class and Region that you specify for the destination object. For * pricing information, see Amazon S3 * pricing.

Amazon S3 transfer acceleration does not support * cross-Region copies. If you request a cross-Region copy using a transfer * acceleration endpoint, you get a 400 Bad Request error. For more * information, see Transfer * Acceleration.

Metadata

When copying an * object, you can preserve all metadata (default) or specify new metadata. * However, the ACL is not preserved and is set to private for the user making the * request. To override the default ACL setting, specify a new ACL when generating * a copy request. For more information, see Using * ACLs.

To specify whether you want the object metadata copied from * the source object or replaced with metadata provided in the request, you can * optionally add the x-amz-metadata-directive header. When you grant * permissions, you can use the s3:x-amz-metadata-directive condition * key to enforce certain metadata behavior when objects are uploaded. For more * information, see Specifying * Conditions in a Policy in the Amazon S3 User Guide. For a complete * list of Amazon S3-specific condition keys, see Actions, * Resources, and Condition Keys for Amazon S3.

* x-amz-copy-source-if Headers

To only copy an object * under certain conditions, such as whether the Etag matches or * whether the object was modified before or after a specified date, use the * following request parameters:

  • * x-amz-copy-source-if-match

  • * x-amz-copy-source-if-none-match

  • * x-amz-copy-source-if-unmodified-since

  • * x-amz-copy-source-if-modified-since

If both * the x-amz-copy-source-if-match and * x-amz-copy-source-if-unmodified-since headers are present in the * request and evaluate as follows, Amazon S3 returns 200 OK and * copies the data:

  • x-amz-copy-source-if-match * condition evaluates to true

  • * x-amz-copy-source-if-unmodified-since condition evaluates to * false

If both the * x-amz-copy-source-if-none-match and * x-amz-copy-source-if-modified-since headers are present in the * request and evaluate as follows, Amazon S3 returns the 412 Precondition * Failed response code:

  • * x-amz-copy-source-if-none-match condition evaluates to false

    *
  • x-amz-copy-source-if-modified-since condition * evaluates to true

All headers with the * x-amz- prefix, including x-amz-copy-source, must be * signed.

Server-side encryption

When you perform a * CopyObject operation, you can optionally use the appropriate encryption-related * headers to encrypt the object using server-side encryption with Amazon Web * Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided * encryption key. With server-side encryption, Amazon S3 encrypts your data as it * writes it to disks in its data centers and decrypts the data when you access it. * For more information about server-side encryption, see Using * Server-Side Encryption.

If a target object uses SSE-KMS, you can * enable an S3 Bucket Key for the object. For more information, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

Access Control * List (ACL)-Specific Request Headers

When copying an object, you can * optionally use headers to grant ACL-based permissions. By default, all objects * are private. Only the owner has full access control. When adding a new object, * you can grant permissions to individual Amazon Web Services accounts or to * predefined groups defined by Amazon S3. These permissions are then added to the * ACL on the object. For more information, see Access * Control List (ACL) Overview and Managing * ACLs Using the REST API.

Storage Class Options

You * can use the CopyObject action to change the storage class of an * object that is already stored in Amazon S3 using the StorageClass * parameter. For more information, see Storage * Classes in the Amazon S3 User Guide.

Versioning

*

By default, x-amz-copy-source identifies the current version of * an object to copy. If the current version is a delete marker, Amazon S3 behaves * as if the object was deleted. To copy a different version, use the * versionId subresource.

If you enable versioning on the * target bucket, Amazon S3 generates a unique version ID for the object being * copied. This version ID is different from the version ID of the source object. * Amazon S3 returns the version ID of the copied object in the * x-amz-version-id response header in the response.

If you do * not enable versioning or suspend it on the target bucket, the version ID that * Amazon S3 generates is always null.

If the source object's storage class * is GLACIER, you must restore a copy of this object before you can use it as a * source object for the copy operation. For more information, see RestoreObject.

*

The following operations are related to CopyObject:

For more information, see Copying * Objects.

See Also:

AWS API * Reference

*/ virtual Model::CopyObjectOutcome CopyObject(const Model::CopyObjectRequest& request) const; /** *

Creates a copy of an object that is already stored in Amazon S3.

*

You can store individual objects of up to 5 TB in Amazon S3. You create a * copy of your object up to 5 GB in size in a single atomic action using this API. * However, to copy an object greater than 5 GB, you must use the multipart upload * Upload Part - Copy API. For more information, see Copy * Object Using the REST Multipart Upload API.

All copy requests * must be authenticated. Additionally, you must have read access to the * source object and write access to the destination bucket. For more * information, see REST * Authentication. Both the Region that you want to copy the object from and * the Region that you want to copy the object to must be enabled for your * account.

A copy request might return an error when Amazon S3 receives the * copy request or while Amazon S3 is copying the files. If the error occurs before * the copy action starts, you receive a standard Amazon S3 error. If the error * occurs during the copy operation, the error response is embedded in the * 200 OK response. This means that a 200 OK response can * contain either a success or an error. Design your application to parse the * contents of the response and handle it appropriately.

If the copy is * successful, you receive a response with information about the copied object.

*

If the request is an HTTP 1.1 request, the response is chunk encoded. * If it were not, it would not contain the content-length, and you would need to * read the entire body.

The copy request charge is based on the * storage class and Region that you specify for the destination object. For * pricing information, see Amazon S3 * pricing.

Amazon S3 transfer acceleration does not support * cross-Region copies. If you request a cross-Region copy using a transfer * acceleration endpoint, you get a 400 Bad Request error. For more * information, see Transfer * Acceleration.

Metadata

When copying an * object, you can preserve all metadata (default) or specify new metadata. * However, the ACL is not preserved and is set to private for the user making the * request. To override the default ACL setting, specify a new ACL when generating * a copy request. For more information, see Using * ACLs.

To specify whether you want the object metadata copied from * the source object or replaced with metadata provided in the request, you can * optionally add the x-amz-metadata-directive header. When you grant * permissions, you can use the s3:x-amz-metadata-directive condition * key to enforce certain metadata behavior when objects are uploaded. For more * information, see Specifying * Conditions in a Policy in the Amazon S3 User Guide. For a complete * list of Amazon S3-specific condition keys, see Actions, * Resources, and Condition Keys for Amazon S3.

* x-amz-copy-source-if Headers

To only copy an object * under certain conditions, such as whether the Etag matches or * whether the object was modified before or after a specified date, use the * following request parameters:

  • * x-amz-copy-source-if-match

  • * x-amz-copy-source-if-none-match

  • * x-amz-copy-source-if-unmodified-since

  • * x-amz-copy-source-if-modified-since

If both * the x-amz-copy-source-if-match and * x-amz-copy-source-if-unmodified-since headers are present in the * request and evaluate as follows, Amazon S3 returns 200 OK and * copies the data:

  • x-amz-copy-source-if-match * condition evaluates to true

  • * x-amz-copy-source-if-unmodified-since condition evaluates to * false

If both the * x-amz-copy-source-if-none-match and * x-amz-copy-source-if-modified-since headers are present in the * request and evaluate as follows, Amazon S3 returns the 412 Precondition * Failed response code:

  • * x-amz-copy-source-if-none-match condition evaluates to false

    *
  • x-amz-copy-source-if-modified-since condition * evaluates to true

All headers with the * x-amz- prefix, including x-amz-copy-source, must be * signed.

Server-side encryption

When you perform a * CopyObject operation, you can optionally use the appropriate encryption-related * headers to encrypt the object using server-side encryption with Amazon Web * Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided * encryption key. With server-side encryption, Amazon S3 encrypts your data as it * writes it to disks in its data centers and decrypts the data when you access it. * For more information about server-side encryption, see Using * Server-Side Encryption.

If a target object uses SSE-KMS, you can * enable an S3 Bucket Key for the object. For more information, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

Access Control * List (ACL)-Specific Request Headers

When copying an object, you can * optionally use headers to grant ACL-based permissions. By default, all objects * are private. Only the owner has full access control. When adding a new object, * you can grant permissions to individual Amazon Web Services accounts or to * predefined groups defined by Amazon S3. These permissions are then added to the * ACL on the object. For more information, see Access * Control List (ACL) Overview and Managing * ACLs Using the REST API.

Storage Class Options

You * can use the CopyObject action to change the storage class of an * object that is already stored in Amazon S3 using the StorageClass * parameter. For more information, see Storage * Classes in the Amazon S3 User Guide.

Versioning

*

By default, x-amz-copy-source identifies the current version of * an object to copy. If the current version is a delete marker, Amazon S3 behaves * as if the object was deleted. To copy a different version, use the * versionId subresource.

If you enable versioning on the * target bucket, Amazon S3 generates a unique version ID for the object being * copied. This version ID is different from the version ID of the source object. * Amazon S3 returns the version ID of the copied object in the * x-amz-version-id response header in the response.

If you do * not enable versioning or suspend it on the target bucket, the version ID that * Amazon S3 generates is always null.

If the source object's storage class * is GLACIER, you must restore a copy of this object before you can use it as a * source object for the copy operation. For more information, see RestoreObject.

*

The following operations are related to CopyObject:

For more information, see Copying * Objects.

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CopyObjectOutcomeCallable CopyObjectCallable(const Model::CopyObjectRequest& request) const; /** *

Creates a copy of an object that is already stored in Amazon S3.

*

You can store individual objects of up to 5 TB in Amazon S3. You create a * copy of your object up to 5 GB in size in a single atomic action using this API. * However, to copy an object greater than 5 GB, you must use the multipart upload * Upload Part - Copy API. For more information, see Copy * Object Using the REST Multipart Upload API.

All copy requests * must be authenticated. Additionally, you must have read access to the * source object and write access to the destination bucket. For more * information, see REST * Authentication. Both the Region that you want to copy the object from and * the Region that you want to copy the object to must be enabled for your * account.

A copy request might return an error when Amazon S3 receives the * copy request or while Amazon S3 is copying the files. If the error occurs before * the copy action starts, you receive a standard Amazon S3 error. If the error * occurs during the copy operation, the error response is embedded in the * 200 OK response. This means that a 200 OK response can * contain either a success or an error. Design your application to parse the * contents of the response and handle it appropriately.

If the copy is * successful, you receive a response with information about the copied object.

*

If the request is an HTTP 1.1 request, the response is chunk encoded. * If it were not, it would not contain the content-length, and you would need to * read the entire body.

The copy request charge is based on the * storage class and Region that you specify for the destination object. For * pricing information, see Amazon S3 * pricing.

Amazon S3 transfer acceleration does not support * cross-Region copies. If you request a cross-Region copy using a transfer * acceleration endpoint, you get a 400 Bad Request error. For more * information, see Transfer * Acceleration.

Metadata

When copying an * object, you can preserve all metadata (default) or specify new metadata. * However, the ACL is not preserved and is set to private for the user making the * request. To override the default ACL setting, specify a new ACL when generating * a copy request. For more information, see Using * ACLs.

To specify whether you want the object metadata copied from * the source object or replaced with metadata provided in the request, you can * optionally add the x-amz-metadata-directive header. When you grant * permissions, you can use the s3:x-amz-metadata-directive condition * key to enforce certain metadata behavior when objects are uploaded. For more * information, see Specifying * Conditions in a Policy in the Amazon S3 User Guide. For a complete * list of Amazon S3-specific condition keys, see Actions, * Resources, and Condition Keys for Amazon S3.

* x-amz-copy-source-if Headers

To only copy an object * under certain conditions, such as whether the Etag matches or * whether the object was modified before or after a specified date, use the * following request parameters:

  • * x-amz-copy-source-if-match

  • * x-amz-copy-source-if-none-match

  • * x-amz-copy-source-if-unmodified-since

  • * x-amz-copy-source-if-modified-since

If both * the x-amz-copy-source-if-match and * x-amz-copy-source-if-unmodified-since headers are present in the * request and evaluate as follows, Amazon S3 returns 200 OK and * copies the data:

  • x-amz-copy-source-if-match * condition evaluates to true

  • * x-amz-copy-source-if-unmodified-since condition evaluates to * false

If both the * x-amz-copy-source-if-none-match and * x-amz-copy-source-if-modified-since headers are present in the * request and evaluate as follows, Amazon S3 returns the 412 Precondition * Failed response code:

  • * x-amz-copy-source-if-none-match condition evaluates to false

    *
  • x-amz-copy-source-if-modified-since condition * evaluates to true

All headers with the * x-amz- prefix, including x-amz-copy-source, must be * signed.

Server-side encryption

When you perform a * CopyObject operation, you can optionally use the appropriate encryption-related * headers to encrypt the object using server-side encryption with Amazon Web * Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided * encryption key. With server-side encryption, Amazon S3 encrypts your data as it * writes it to disks in its data centers and decrypts the data when you access it. * For more information about server-side encryption, see Using * Server-Side Encryption.

If a target object uses SSE-KMS, you can * enable an S3 Bucket Key for the object. For more information, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

Access Control * List (ACL)-Specific Request Headers

When copying an object, you can * optionally use headers to grant ACL-based permissions. By default, all objects * are private. Only the owner has full access control. When adding a new object, * you can grant permissions to individual Amazon Web Services accounts or to * predefined groups defined by Amazon S3. These permissions are then added to the * ACL on the object. For more information, see Access * Control List (ACL) Overview and Managing * ACLs Using the REST API.

Storage Class Options

You * can use the CopyObject action to change the storage class of an * object that is already stored in Amazon S3 using the StorageClass * parameter. For more information, see Storage * Classes in the Amazon S3 User Guide.

Versioning

*

By default, x-amz-copy-source identifies the current version of * an object to copy. If the current version is a delete marker, Amazon S3 behaves * as if the object was deleted. To copy a different version, use the * versionId subresource.

If you enable versioning on the * target bucket, Amazon S3 generates a unique version ID for the object being * copied. This version ID is different from the version ID of the source object. * Amazon S3 returns the version ID of the copied object in the * x-amz-version-id response header in the response.

If you do * not enable versioning or suspend it on the target bucket, the version ID that * Amazon S3 generates is always null.

If the source object's storage class * is GLACIER, you must restore a copy of this object before you can use it as a * source object for the copy operation. For more information, see RestoreObject.

*

The following operations are related to CopyObject:

For more information, see Copying * Objects.

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CopyObjectAsync(const Model::CopyObjectRequest& request, const CopyObjectResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 * and have a valid Amazon Web Services Access Key ID to authenticate requests. * Anonymous requests are never allowed to create buckets. By creating the bucket, * you become the bucket owner.

Not every string is an acceptable bucket * name. For information about bucket naming restrictions, see Bucket * naming rules.

If you want to create an Amazon S3 on Outposts bucket, * see Create * Bucket.

By default, the bucket is created in the US East (N. * Virginia) Region. You can optionally specify a Region in the request body. You * might choose a Region to optimize latency, minimize costs, or address regulatory * requirements. For example, if you reside in Europe, you will probably find it * advantageous to create buckets in the Europe (Ireland) Region. For more * information, see Accessing * a bucket.

If you send your create bucket request to the * s3.amazonaws.com endpoint, the request goes to the us-east-1 * Region. Accordingly, the signature calculations in Signature Version 4 must use * us-east-1 as the Region, even if the location constraint in the request * specifies another Region where the bucket is to be created. If you create a * bucket in a Region other than US East (N. Virginia), your application must be * able to handle 307 redirect. For more information, see Virtual * hosting of buckets.

When creating a bucket using this * operation, you can optionally specify the accounts or groups that should be * granted specific permissions on the bucket. There are two ways to grant the * appropriate permissions using the request headers.

  • Specify a * canned ACL using the x-amz-acl request header. Amazon S3 supports a * set of predefined ACLs, known as canned ACLs. Each canned ACL has a * predefined set of grantees and permissions. For more information, see Canned * ACL.

  • Specify access permissions explicitly using the * x-amz-grant-read, x-amz-grant-write, * x-amz-grant-read-acp, x-amz-grant-write-acp, and * x-amz-grant-full-control headers. These headers map to the set of * permissions Amazon S3 supports in an ACL. For more information, see Access * control list (ACL) overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants the Amazon Web Services accounts identified by account IDs permissions to * read object data and its metadata:

    x-amz-grant-read: * id="11112222333", id="444455556666"

You can * use either a canned ACL or specify access permissions explicitly. You cannot do * both.

Permissions

If your * CreateBucket request specifies ACL permissions and the ACL is * public-read, public-read-write, authenticated-read, or if you specify access * permissions explicitly through any other ACL, both s3:CreateBucket * and s3:PutBucketAcl permissions are needed. If the ACL the * CreateBucket request is private, only s3:CreateBucket * permission is needed.

If ObjectLockEnabledForBucket is set * to true in your CreateBucket request, * s3:PutBucketObjectLockConfiguration and * s3:PutBucketVersioning permissions are required.

The * following operations are related to CreateBucket:

See Also:

AWS API * Reference

*/ virtual Model::CreateBucketOutcome CreateBucket(const Model::CreateBucketRequest& request) const; /** *

Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 * and have a valid Amazon Web Services Access Key ID to authenticate requests. * Anonymous requests are never allowed to create buckets. By creating the bucket, * you become the bucket owner.

Not every string is an acceptable bucket * name. For information about bucket naming restrictions, see Bucket * naming rules.

If you want to create an Amazon S3 on Outposts bucket, * see Create * Bucket.

By default, the bucket is created in the US East (N. * Virginia) Region. You can optionally specify a Region in the request body. You * might choose a Region to optimize latency, minimize costs, or address regulatory * requirements. For example, if you reside in Europe, you will probably find it * advantageous to create buckets in the Europe (Ireland) Region. For more * information, see Accessing * a bucket.

If you send your create bucket request to the * s3.amazonaws.com endpoint, the request goes to the us-east-1 * Region. Accordingly, the signature calculations in Signature Version 4 must use * us-east-1 as the Region, even if the location constraint in the request * specifies another Region where the bucket is to be created. If you create a * bucket in a Region other than US East (N. Virginia), your application must be * able to handle 307 redirect. For more information, see Virtual * hosting of buckets.

When creating a bucket using this * operation, you can optionally specify the accounts or groups that should be * granted specific permissions on the bucket. There are two ways to grant the * appropriate permissions using the request headers.

  • Specify a * canned ACL using the x-amz-acl request header. Amazon S3 supports a * set of predefined ACLs, known as canned ACLs. Each canned ACL has a * predefined set of grantees and permissions. For more information, see Canned * ACL.

  • Specify access permissions explicitly using the * x-amz-grant-read, x-amz-grant-write, * x-amz-grant-read-acp, x-amz-grant-write-acp, and * x-amz-grant-full-control headers. These headers map to the set of * permissions Amazon S3 supports in an ACL. For more information, see Access * control list (ACL) overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants the Amazon Web Services accounts identified by account IDs permissions to * read object data and its metadata:

    x-amz-grant-read: * id="11112222333", id="444455556666"

You can * use either a canned ACL or specify access permissions explicitly. You cannot do * both.

Permissions

If your * CreateBucket request specifies ACL permissions and the ACL is * public-read, public-read-write, authenticated-read, or if you specify access * permissions explicitly through any other ACL, both s3:CreateBucket * and s3:PutBucketAcl permissions are needed. If the ACL the * CreateBucket request is private, only s3:CreateBucket * permission is needed.

If ObjectLockEnabledForBucket is set * to true in your CreateBucket request, * s3:PutBucketObjectLockConfiguration and * s3:PutBucketVersioning permissions are required.

The * following operations are related to CreateBucket:

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateBucketOutcomeCallable CreateBucketCallable(const Model::CreateBucketRequest& request) const; /** *

Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 * and have a valid Amazon Web Services Access Key ID to authenticate requests. * Anonymous requests are never allowed to create buckets. By creating the bucket, * you become the bucket owner.

Not every string is an acceptable bucket * name. For information about bucket naming restrictions, see Bucket * naming rules.

If you want to create an Amazon S3 on Outposts bucket, * see Create * Bucket.

By default, the bucket is created in the US East (N. * Virginia) Region. You can optionally specify a Region in the request body. You * might choose a Region to optimize latency, minimize costs, or address regulatory * requirements. For example, if you reside in Europe, you will probably find it * advantageous to create buckets in the Europe (Ireland) Region. For more * information, see Accessing * a bucket.

If you send your create bucket request to the * s3.amazonaws.com endpoint, the request goes to the us-east-1 * Region. Accordingly, the signature calculations in Signature Version 4 must use * us-east-1 as the Region, even if the location constraint in the request * specifies another Region where the bucket is to be created. If you create a * bucket in a Region other than US East (N. Virginia), your application must be * able to handle 307 redirect. For more information, see Virtual * hosting of buckets.

When creating a bucket using this * operation, you can optionally specify the accounts or groups that should be * granted specific permissions on the bucket. There are two ways to grant the * appropriate permissions using the request headers.

  • Specify a * canned ACL using the x-amz-acl request header. Amazon S3 supports a * set of predefined ACLs, known as canned ACLs. Each canned ACL has a * predefined set of grantees and permissions. For more information, see Canned * ACL.

  • Specify access permissions explicitly using the * x-amz-grant-read, x-amz-grant-write, * x-amz-grant-read-acp, x-amz-grant-write-acp, and * x-amz-grant-full-control headers. These headers map to the set of * permissions Amazon S3 supports in an ACL. For more information, see Access * control list (ACL) overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants the Amazon Web Services accounts identified by account IDs permissions to * read object data and its metadata:

    x-amz-grant-read: * id="11112222333", id="444455556666"

You can * use either a canned ACL or specify access permissions explicitly. You cannot do * both.

Permissions

If your * CreateBucket request specifies ACL permissions and the ACL is * public-read, public-read-write, authenticated-read, or if you specify access * permissions explicitly through any other ACL, both s3:CreateBucket * and s3:PutBucketAcl permissions are needed. If the ACL the * CreateBucket request is private, only s3:CreateBucket * permission is needed.

If ObjectLockEnabledForBucket is set * to true in your CreateBucket request, * s3:PutBucketObjectLockConfiguration and * s3:PutBucketVersioning permissions are required.

The * following operations are related to CreateBucket:

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateBucketAsync(const Model::CreateBucketRequest& request, const CreateBucketResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action initiates a multipart upload and returns an upload ID. This * upload ID is used to associate all of the parts in the specific multipart * upload. You specify this upload ID in each of your subsequent upload part * requests (see UploadPart). * You also include this upload ID in the final request to either complete or abort * the multipart upload request.

For more information about multipart * uploads, see Multipart * Upload Overview.

If you have configured a lifecycle rule to abort * incomplete multipart uploads, the upload must complete within the number of days * specified in the bucket lifecycle configuration. Otherwise, the incomplete * multipart upload becomes eligible for an abort action and Amazon S3 aborts the * multipart upload. For more information, see Aborting * Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For * information about the permissions required to use the multipart upload API, see * Multipart * Upload and Permissions.

For request signing, multipart upload is just * a series of regular requests. You initiate a multipart upload, send one or more * requests to upload parts, and then complete the multipart upload process. You * sign each request individually. There is nothing special about signing multipart * upload requests. For more information about signing, see Authenticating * Requests (Amazon Web Services Signature Version 4).

After you * initiate a multipart upload and upload one or more parts, to stop being charged * for storing the uploaded parts, you must either complete or abort the multipart * upload. Amazon S3 frees up the space used to store the parts and stop charging * you for storing them only after you either complete or abort a multipart upload. *

You can optionally request server-side encryption. For * server-side encryption, Amazon S3 encrypts your data as it writes it to disks in * its data centers and decrypts it when you access it. You can provide your own * encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed * encryption keys. If you choose to provide your own encryption key, the request * headers you provide in UploadPart * and UploadPartCopy * requests must match the headers you used in the request to initiate the upload * by using CreateMultipartUpload.

To perform a multipart * upload with encryption using an Amazon Web Services KMS key, the requester must * have permission to the kms:Decrypt and * kms:GenerateDataKey* actions on the key. These permissions are * required because Amazon S3 must decrypt and read data from the encrypted file * parts before it completes the multipart upload. For more information, see Multipart * upload API and permissions in the Amazon S3 User Guide.

If * your Identity and Access Management (IAM) user or role is in the same Amazon Web * Services account as the KMS key, then you must have these permissions on the key * policy. If your IAM user or role belongs to a different account than the key, * then you must have the permissions on both the key policy and your IAM user or * role.

For more information, see Protecting * Data Using Server-Side Encryption.

Access Permissions
*

When copying an object, you can optionally specify the accounts or groups * that should be granted specific permissions on the new object. There are two * ways to grant the permissions using the request headers:

  • *

    Specify a canned ACL with the x-amz-acl request header. For more * information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. These parameters map to the set of permissions that Amazon S3 supports * in an ACL. For more information, see Access * Control List (ACL) Overview.

You can use either a canned * ACL or specify access permissions explicitly. You cannot do both.

*
Server-Side- Encryption-Specific Request Headers

You can * optionally tell Amazon S3 to encrypt data at rest using server-side encryption. * Server-side encryption is for data encryption at rest. Amazon S3 encrypts your * data as it writes it to disks in its data centers and decrypts it when you * access it. The option you use depends on whether you want to use Amazon Web * Services managed encryption keys or provide your own encryption key.

    *
  • Use encryption keys managed by Amazon S3 or customer managed key stored * in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you * want Amazon Web Services to manage the keys used to encrypt data, specify the * following headers in the request.

    • *

      x-amz-server-side-encryption

    • *

      x-amz-server-side-encryption-aws-kms-key-id

    • *

      x-amz-server-side-encryption-context

    If you specify * x-amz-server-side-encryption:aws:kms, but don't provide * x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the * Amazon Web Services managed key in Amazon Web Services KMS to protect the * data.

    All GET and PUT requests for an object * protected by Amazon Web Services KMS fail if you don't make them with SSL or by * using SigV4.

    For more information about server-side * encryption with KMS key (SSE-KMS), see Protecting * Data Using Server-Side Encryption with KMS keys.

  • Use * customer-provided encryption keys – If you want to manage your own encryption * keys, provide all the following headers in the request.

    • *

      x-amz-server-side-encryption-customer-algorithm

    • *

      x-amz-server-side-encryption-customer-key

    • *

      x-amz-server-side-encryption-customer-key-MD5

    For more * information about server-side encryption with KMS keys (SSE-KMS), see Protecting * Data Using Server-Side Encryption with KMS keys.

*
Access-Control-List (ACL)-Specific Request Headers

You also can * use the following access control–related headers with this operation. By * default, all objects are private. Only the owner has full access control. When * adding a new object, you can grant permissions to individual Amazon Web Services * accounts or to predefined groups defined by Amazon S3. These permissions are * then added to the access control list (ACL) on the object. For more information, * see Using * ACLs. With this operation, you can grant access permissions using one of the * following two methods:

  • Specify a canned ACL * (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as * canned ACLs. Each canned ACL has a predefined set of grantees and * permissions. For more information, see Canned * ACL.

  • Specify access permissions explicitly — To explicitly * grant access permissions to specific Amazon Web Services accounts or groups, use * the following headers. Each header maps to specific permissions that Amazon S3 * supports in an ACL. For more information, see Access * Control List (ACL) Overview. In the header, you specify a list of grantees * who get the specific permission. To grant permissions explicitly, use:

      *
    • x-amz-grant-read

    • x-amz-grant-write

    • *

      x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • *

      x-amz-grant-full-control

    You specify each grantee as a * type=value pair, where the type is one of the following:

    • * id – if the value specified is the canonical user ID of an Amazon * Web Services account

    • uri – if you are granting * permissions to a predefined group

    • emailAddress – * if the value specified is the email address of an Amazon Web Services * account

      Using email addresses to specify a grantee is only * supported in the following Amazon Web Services Regions:

      • US * East (N. Virginia)

      • US West (N. California)

      • * US West (Oregon)

      • Asia Pacific (Singapore)

      • *

        Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • *

        Europe (Ireland)

      • South America (São Paulo)

      *

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants the Amazon Web Services accounts identified by account IDs permissions to * read object data and its metadata:

    x-amz-grant-read: * id="11112222333", id="444455556666"

The * following operations are related to CreateMultipartUpload:

See Also:

AWS * API Reference

*/ virtual Model::CreateMultipartUploadOutcome CreateMultipartUpload(const Model::CreateMultipartUploadRequest& request) const; /** *

This action initiates a multipart upload and returns an upload ID. This * upload ID is used to associate all of the parts in the specific multipart * upload. You specify this upload ID in each of your subsequent upload part * requests (see UploadPart). * You also include this upload ID in the final request to either complete or abort * the multipart upload request.

For more information about multipart * uploads, see Multipart * Upload Overview.

If you have configured a lifecycle rule to abort * incomplete multipart uploads, the upload must complete within the number of days * specified in the bucket lifecycle configuration. Otherwise, the incomplete * multipart upload becomes eligible for an abort action and Amazon S3 aborts the * multipart upload. For more information, see Aborting * Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For * information about the permissions required to use the multipart upload API, see * Multipart * Upload and Permissions.

For request signing, multipart upload is just * a series of regular requests. You initiate a multipart upload, send one or more * requests to upload parts, and then complete the multipart upload process. You * sign each request individually. There is nothing special about signing multipart * upload requests. For more information about signing, see Authenticating * Requests (Amazon Web Services Signature Version 4).

After you * initiate a multipart upload and upload one or more parts, to stop being charged * for storing the uploaded parts, you must either complete or abort the multipart * upload. Amazon S3 frees up the space used to store the parts and stop charging * you for storing them only after you either complete or abort a multipart upload. *

You can optionally request server-side encryption. For * server-side encryption, Amazon S3 encrypts your data as it writes it to disks in * its data centers and decrypts it when you access it. You can provide your own * encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed * encryption keys. If you choose to provide your own encryption key, the request * headers you provide in UploadPart * and UploadPartCopy * requests must match the headers you used in the request to initiate the upload * by using CreateMultipartUpload.

To perform a multipart * upload with encryption using an Amazon Web Services KMS key, the requester must * have permission to the kms:Decrypt and * kms:GenerateDataKey* actions on the key. These permissions are * required because Amazon S3 must decrypt and read data from the encrypted file * parts before it completes the multipart upload. For more information, see Multipart * upload API and permissions in the Amazon S3 User Guide.

If * your Identity and Access Management (IAM) user or role is in the same Amazon Web * Services account as the KMS key, then you must have these permissions on the key * policy. If your IAM user or role belongs to a different account than the key, * then you must have the permissions on both the key policy and your IAM user or * role.

For more information, see Protecting * Data Using Server-Side Encryption.

Access Permissions
*

When copying an object, you can optionally specify the accounts or groups * that should be granted specific permissions on the new object. There are two * ways to grant the permissions using the request headers:

  • *

    Specify a canned ACL with the x-amz-acl request header. For more * information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. These parameters map to the set of permissions that Amazon S3 supports * in an ACL. For more information, see Access * Control List (ACL) Overview.

You can use either a canned * ACL or specify access permissions explicitly. You cannot do both.

*
Server-Side- Encryption-Specific Request Headers

You can * optionally tell Amazon S3 to encrypt data at rest using server-side encryption. * Server-side encryption is for data encryption at rest. Amazon S3 encrypts your * data as it writes it to disks in its data centers and decrypts it when you * access it. The option you use depends on whether you want to use Amazon Web * Services managed encryption keys or provide your own encryption key.

    *
  • Use encryption keys managed by Amazon S3 or customer managed key stored * in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you * want Amazon Web Services to manage the keys used to encrypt data, specify the * following headers in the request.

    • *

      x-amz-server-side-encryption

    • *

      x-amz-server-side-encryption-aws-kms-key-id

    • *

      x-amz-server-side-encryption-context

    If you specify * x-amz-server-side-encryption:aws:kms, but don't provide * x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the * Amazon Web Services managed key in Amazon Web Services KMS to protect the * data.

    All GET and PUT requests for an object * protected by Amazon Web Services KMS fail if you don't make them with SSL or by * using SigV4.

    For more information about server-side * encryption with KMS key (SSE-KMS), see Protecting * Data Using Server-Side Encryption with KMS keys.

  • Use * customer-provided encryption keys – If you want to manage your own encryption * keys, provide all the following headers in the request.

    • *

      x-amz-server-side-encryption-customer-algorithm

    • *

      x-amz-server-side-encryption-customer-key

    • *

      x-amz-server-side-encryption-customer-key-MD5

    For more * information about server-side encryption with KMS keys (SSE-KMS), see Protecting * Data Using Server-Side Encryption with KMS keys.

*
Access-Control-List (ACL)-Specific Request Headers

You also can * use the following access control–related headers with this operation. By * default, all objects are private. Only the owner has full access control. When * adding a new object, you can grant permissions to individual Amazon Web Services * accounts or to predefined groups defined by Amazon S3. These permissions are * then added to the access control list (ACL) on the object. For more information, * see Using * ACLs. With this operation, you can grant access permissions using one of the * following two methods:

  • Specify a canned ACL * (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as * canned ACLs. Each canned ACL has a predefined set of grantees and * permissions. For more information, see Canned * ACL.

  • Specify access permissions explicitly — To explicitly * grant access permissions to specific Amazon Web Services accounts or groups, use * the following headers. Each header maps to specific permissions that Amazon S3 * supports in an ACL. For more information, see Access * Control List (ACL) Overview. In the header, you specify a list of grantees * who get the specific permission. To grant permissions explicitly, use:

      *
    • x-amz-grant-read

    • x-amz-grant-write

    • *

      x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • *

      x-amz-grant-full-control

    You specify each grantee as a * type=value pair, where the type is one of the following:

    • * id – if the value specified is the canonical user ID of an Amazon * Web Services account

    • uri – if you are granting * permissions to a predefined group

    • emailAddress – * if the value specified is the email address of an Amazon Web Services * account

      Using email addresses to specify a grantee is only * supported in the following Amazon Web Services Regions:

      • US * East (N. Virginia)

      • US West (N. California)

      • * US West (Oregon)

      • Asia Pacific (Singapore)

      • *

        Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • *

        Europe (Ireland)

      • South America (São Paulo)

      *

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants the Amazon Web Services accounts identified by account IDs permissions to * read object data and its metadata:

    x-amz-grant-read: * id="11112222333", id="444455556666"

The * following operations are related to CreateMultipartUpload:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateMultipartUploadOutcomeCallable CreateMultipartUploadCallable(const Model::CreateMultipartUploadRequest& request) const; /** *

This action initiates a multipart upload and returns an upload ID. This * upload ID is used to associate all of the parts in the specific multipart * upload. You specify this upload ID in each of your subsequent upload part * requests (see UploadPart). * You also include this upload ID in the final request to either complete or abort * the multipart upload request.

For more information about multipart * uploads, see Multipart * Upload Overview.

If you have configured a lifecycle rule to abort * incomplete multipart uploads, the upload must complete within the number of days * specified in the bucket lifecycle configuration. Otherwise, the incomplete * multipart upload becomes eligible for an abort action and Amazon S3 aborts the * multipart upload. For more information, see Aborting * Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For * information about the permissions required to use the multipart upload API, see * Multipart * Upload and Permissions.

For request signing, multipart upload is just * a series of regular requests. You initiate a multipart upload, send one or more * requests to upload parts, and then complete the multipart upload process. You * sign each request individually. There is nothing special about signing multipart * upload requests. For more information about signing, see Authenticating * Requests (Amazon Web Services Signature Version 4).

After you * initiate a multipart upload and upload one or more parts, to stop being charged * for storing the uploaded parts, you must either complete or abort the multipart * upload. Amazon S3 frees up the space used to store the parts and stop charging * you for storing them only after you either complete or abort a multipart upload. *

You can optionally request server-side encryption. For * server-side encryption, Amazon S3 encrypts your data as it writes it to disks in * its data centers and decrypts it when you access it. You can provide your own * encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed * encryption keys. If you choose to provide your own encryption key, the request * headers you provide in UploadPart * and UploadPartCopy * requests must match the headers you used in the request to initiate the upload * by using CreateMultipartUpload.

To perform a multipart * upload with encryption using an Amazon Web Services KMS key, the requester must * have permission to the kms:Decrypt and * kms:GenerateDataKey* actions on the key. These permissions are * required because Amazon S3 must decrypt and read data from the encrypted file * parts before it completes the multipart upload. For more information, see Multipart * upload API and permissions in the Amazon S3 User Guide.

If * your Identity and Access Management (IAM) user or role is in the same Amazon Web * Services account as the KMS key, then you must have these permissions on the key * policy. If your IAM user or role belongs to a different account than the key, * then you must have the permissions on both the key policy and your IAM user or * role.

For more information, see Protecting * Data Using Server-Side Encryption.

Access Permissions
*

When copying an object, you can optionally specify the accounts or groups * that should be granted specific permissions on the new object. There are two * ways to grant the permissions using the request headers:

  • *

    Specify a canned ACL with the x-amz-acl request header. For more * information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. These parameters map to the set of permissions that Amazon S3 supports * in an ACL. For more information, see Access * Control List (ACL) Overview.

You can use either a canned * ACL or specify access permissions explicitly. You cannot do both.

*
Server-Side- Encryption-Specific Request Headers

You can * optionally tell Amazon S3 to encrypt data at rest using server-side encryption. * Server-side encryption is for data encryption at rest. Amazon S3 encrypts your * data as it writes it to disks in its data centers and decrypts it when you * access it. The option you use depends on whether you want to use Amazon Web * Services managed encryption keys or provide your own encryption key.

    *
  • Use encryption keys managed by Amazon S3 or customer managed key stored * in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you * want Amazon Web Services to manage the keys used to encrypt data, specify the * following headers in the request.

    • *

      x-amz-server-side-encryption

    • *

      x-amz-server-side-encryption-aws-kms-key-id

    • *

      x-amz-server-side-encryption-context

    If you specify * x-amz-server-side-encryption:aws:kms, but don't provide * x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the * Amazon Web Services managed key in Amazon Web Services KMS to protect the * data.

    All GET and PUT requests for an object * protected by Amazon Web Services KMS fail if you don't make them with SSL or by * using SigV4.

    For more information about server-side * encryption with KMS key (SSE-KMS), see Protecting * Data Using Server-Side Encryption with KMS keys.

  • Use * customer-provided encryption keys – If you want to manage your own encryption * keys, provide all the following headers in the request.

    • *

      x-amz-server-side-encryption-customer-algorithm

    • *

      x-amz-server-side-encryption-customer-key

    • *

      x-amz-server-side-encryption-customer-key-MD5

    For more * information about server-side encryption with KMS keys (SSE-KMS), see Protecting * Data Using Server-Side Encryption with KMS keys.

*
Access-Control-List (ACL)-Specific Request Headers

You also can * use the following access control–related headers with this operation. By * default, all objects are private. Only the owner has full access control. When * adding a new object, you can grant permissions to individual Amazon Web Services * accounts or to predefined groups defined by Amazon S3. These permissions are * then added to the access control list (ACL) on the object. For more information, * see Using * ACLs. With this operation, you can grant access permissions using one of the * following two methods:

  • Specify a canned ACL * (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as * canned ACLs. Each canned ACL has a predefined set of grantees and * permissions. For more information, see Canned * ACL.

  • Specify access permissions explicitly — To explicitly * grant access permissions to specific Amazon Web Services accounts or groups, use * the following headers. Each header maps to specific permissions that Amazon S3 * supports in an ACL. For more information, see Access * Control List (ACL) Overview. In the header, you specify a list of grantees * who get the specific permission. To grant permissions explicitly, use:

      *
    • x-amz-grant-read

    • x-amz-grant-write

    • *

      x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • *

      x-amz-grant-full-control

    You specify each grantee as a * type=value pair, where the type is one of the following:

    • * id – if the value specified is the canonical user ID of an Amazon * Web Services account

    • uri – if you are granting * permissions to a predefined group

    • emailAddress – * if the value specified is the email address of an Amazon Web Services * account

      Using email addresses to specify a grantee is only * supported in the following Amazon Web Services Regions:

      • US * East (N. Virginia)

      • US West (N. California)

      • * US West (Oregon)

      • Asia Pacific (Singapore)

      • *

        Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • *

        Europe (Ireland)

      • South America (São Paulo)

      *

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants the Amazon Web Services accounts identified by account IDs permissions to * read object data and its metadata:

    x-amz-grant-read: * id="11112222333", id="444455556666"

The * following operations are related to CreateMultipartUpload:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateMultipartUploadAsync(const Model::CreateMultipartUploadRequest& request, const CreateMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the S3 bucket. All objects (including all object versions and delete * markers) in the bucket must be deleted before the bucket itself can be * deleted.

Related Resources

See Also:

AWS API * Reference

*/ virtual Model::DeleteBucketOutcome DeleteBucket(const Model::DeleteBucketRequest& request) const; /** *

Deletes the S3 bucket. All objects (including all object versions and delete * markers) in the bucket must be deleted before the bucket itself can be * deleted.

Related Resources

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketOutcomeCallable DeleteBucketCallable(const Model::DeleteBucketRequest& request) const; /** *

Deletes the S3 bucket. All objects (including all object versions and delete * markers) in the bucket must be deleted before the bucket itself can be * deleted.

Related Resources

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketAsync(const Model::DeleteBucketRequest& request, const DeleteBucketResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an analytics configuration for the bucket (specified by the analytics * configuration ID).

To use this operation, you must have permissions to * perform the s3:PutAnalyticsConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis.

The following operations are * related to DeleteBucketAnalyticsConfiguration:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketAnalyticsConfigurationOutcome DeleteBucketAnalyticsConfiguration(const Model::DeleteBucketAnalyticsConfigurationRequest& request) const; /** *

Deletes an analytics configuration for the bucket (specified by the analytics * configuration ID).

To use this operation, you must have permissions to * perform the s3:PutAnalyticsConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis.

The following operations are * related to DeleteBucketAnalyticsConfiguration:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketAnalyticsConfigurationOutcomeCallable DeleteBucketAnalyticsConfigurationCallable(const Model::DeleteBucketAnalyticsConfigurationRequest& request) const; /** *

Deletes an analytics configuration for the bucket (specified by the analytics * configuration ID).

To use this operation, you must have permissions to * perform the s3:PutAnalyticsConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis.

The following operations are * related to DeleteBucketAnalyticsConfiguration:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketAnalyticsConfigurationAsync(const Model::DeleteBucketAnalyticsConfigurationRequest& request, const DeleteBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the cors configuration information set for the * bucket.

To use this operation, you must have permission to perform the * s3:PutBucketCORS action. The bucket owner has this permission by * default and can grant this permission to others.

For information about * cors, see Enabling * Cross-Origin Resource Sharing in the Amazon S3 User Guide.

Related Resources:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketCorsOutcome DeleteBucketCors(const Model::DeleteBucketCorsRequest& request) const; /** *

Deletes the cors configuration information set for the * bucket.

To use this operation, you must have permission to perform the * s3:PutBucketCORS action. The bucket owner has this permission by * default and can grant this permission to others.

For information about * cors, see Enabling * Cross-Origin Resource Sharing in the Amazon S3 User Guide.

Related Resources:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketCorsOutcomeCallable DeleteBucketCorsCallable(const Model::DeleteBucketCorsRequest& request) const; /** *

Deletes the cors configuration information set for the * bucket.

To use this operation, you must have permission to perform the * s3:PutBucketCORS action. The bucket owner has this permission by * default and can grant this permission to others.

For information about * cors, see Enabling * Cross-Origin Resource Sharing in the Amazon S3 User Guide.

Related Resources:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketCorsAsync(const Model::DeleteBucketCorsRequest& request, const DeleteBucketCorsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This implementation of the DELETE action removes default encryption from the * bucket. For information about the Amazon S3 default encryption feature, see Amazon * S3 Default Bucket Encryption in the Amazon S3 User Guide.

To * use this operation, you must have permissions to perform the * s3:PutEncryptionConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to your Amazon S3 Resources in the Amazon S3 User * Guide.

Related Resources

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketEncryptionOutcome DeleteBucketEncryption(const Model::DeleteBucketEncryptionRequest& request) const; /** *

This implementation of the DELETE action removes default encryption from the * bucket. For information about the Amazon S3 default encryption feature, see Amazon * S3 Default Bucket Encryption in the Amazon S3 User Guide.

To * use this operation, you must have permissions to perform the * s3:PutEncryptionConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to your Amazon S3 Resources in the Amazon S3 User * Guide.

Related Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketEncryptionOutcomeCallable DeleteBucketEncryptionCallable(const Model::DeleteBucketEncryptionRequest& request) const; /** *

This implementation of the DELETE action removes default encryption from the * bucket. For information about the Amazon S3 default encryption feature, see Amazon * S3 Default Bucket Encryption in the Amazon S3 User Guide.

To * use this operation, you must have permissions to perform the * s3:PutEncryptionConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to your Amazon S3 Resources in the Amazon S3 User * Guide.

Related Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketEncryptionAsync(const Model::DeleteBucketEncryptionRequest& request, const DeleteBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the S3 Intelligent-Tiering configuration from the specified * bucket.

The S3 Intelligent-Tiering storage class is designed to optimize * storage costs by automatically moving data to the most cost-effective storage * access tier, without performance impact or operational overhead. S3 * Intelligent-Tiering delivers automatic cost savings in two low latency and high * throughput access tiers. For data that can be accessed asynchronously, you can * choose to activate automatic archiving capabilities within the S3 * Intelligent-Tiering storage class.

The S3 Intelligent-Tiering storage * class is the ideal storage class for data with unknown, changing, or * unpredictable access patterns, independent of object size or retention period. * If the size of an object is less than 128 KB, it is not eligible for * auto-tiering. Smaller objects can be stored, but they are always charged at the * Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

*

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * DeleteBucketIntelligentTieringConfiguration include:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketIntelligentTieringConfigurationOutcome DeleteBucketIntelligentTieringConfiguration(const Model::DeleteBucketIntelligentTieringConfigurationRequest& request) const; /** *

Deletes the S3 Intelligent-Tiering configuration from the specified * bucket.

The S3 Intelligent-Tiering storage class is designed to optimize * storage costs by automatically moving data to the most cost-effective storage * access tier, without performance impact or operational overhead. S3 * Intelligent-Tiering delivers automatic cost savings in two low latency and high * throughput access tiers. For data that can be accessed asynchronously, you can * choose to activate automatic archiving capabilities within the S3 * Intelligent-Tiering storage class.

The S3 Intelligent-Tiering storage * class is the ideal storage class for data with unknown, changing, or * unpredictable access patterns, independent of object size or retention period. * If the size of an object is less than 128 KB, it is not eligible for * auto-tiering. Smaller objects can be stored, but they are always charged at the * Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

*

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * DeleteBucketIntelligentTieringConfiguration include:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketIntelligentTieringConfigurationOutcomeCallable DeleteBucketIntelligentTieringConfigurationCallable(const Model::DeleteBucketIntelligentTieringConfigurationRequest& request) const; /** *

Deletes the S3 Intelligent-Tiering configuration from the specified * bucket.

The S3 Intelligent-Tiering storage class is designed to optimize * storage costs by automatically moving data to the most cost-effective storage * access tier, without performance impact or operational overhead. S3 * Intelligent-Tiering delivers automatic cost savings in two low latency and high * throughput access tiers. For data that can be accessed asynchronously, you can * choose to activate automatic archiving capabilities within the S3 * Intelligent-Tiering storage class.

The S3 Intelligent-Tiering storage * class is the ideal storage class for data with unknown, changing, or * unpredictable access patterns, independent of object size or retention period. * If the size of an object is less than 128 KB, it is not eligible for * auto-tiering. Smaller objects can be stored, but they are always charged at the * Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

*

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * DeleteBucketIntelligentTieringConfiguration include:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketIntelligentTieringConfigurationAsync(const Model::DeleteBucketIntelligentTieringConfigurationRequest& request, const DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an inventory configuration (identified by the inventory ID) from the * bucket.

To use this operation, you must have permissions to perform the * s3:PutInventoryConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory.

Operations related to * DeleteBucketInventoryConfiguration include:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketInventoryConfigurationOutcome DeleteBucketInventoryConfiguration(const Model::DeleteBucketInventoryConfigurationRequest& request) const; /** *

Deletes an inventory configuration (identified by the inventory ID) from the * bucket.

To use this operation, you must have permissions to perform the * s3:PutInventoryConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory.

Operations related to * DeleteBucketInventoryConfiguration include:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketInventoryConfigurationOutcomeCallable DeleteBucketInventoryConfigurationCallable(const Model::DeleteBucketInventoryConfigurationRequest& request) const; /** *

Deletes an inventory configuration (identified by the inventory ID) from the * bucket.

To use this operation, you must have permissions to perform the * s3:PutInventoryConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory.

Operations related to * DeleteBucketInventoryConfiguration include:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketInventoryConfigurationAsync(const Model::DeleteBucketInventoryConfigurationRequest& request, const DeleteBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the lifecycle configuration from the specified bucket. Amazon S3 * removes all the lifecycle configuration rules in the lifecycle subresource * associated with the bucket. Your objects never expire, and Amazon S3 no longer * automatically deletes any objects on the basis of rules contained in the deleted * lifecycle configuration.

To use this operation, you must have permission * to perform the s3:PutLifecycleConfiguration action. By default, the * bucket owner has this permission and the bucket owner can grant this permission * to others.

There is usually some time lag before lifecycle configuration * deletion is fully propagated to all the Amazon S3 systems.

For more * information about the object expiration, see Elements * to Describe Lifecycle Actions.

Related actions include:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketLifecycleOutcome DeleteBucketLifecycle(const Model::DeleteBucketLifecycleRequest& request) const; /** *

Deletes the lifecycle configuration from the specified bucket. Amazon S3 * removes all the lifecycle configuration rules in the lifecycle subresource * associated with the bucket. Your objects never expire, and Amazon S3 no longer * automatically deletes any objects on the basis of rules contained in the deleted * lifecycle configuration.

To use this operation, you must have permission * to perform the s3:PutLifecycleConfiguration action. By default, the * bucket owner has this permission and the bucket owner can grant this permission * to others.

There is usually some time lag before lifecycle configuration * deletion is fully propagated to all the Amazon S3 systems.

For more * information about the object expiration, see Elements * to Describe Lifecycle Actions.

Related actions include:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketLifecycleOutcomeCallable DeleteBucketLifecycleCallable(const Model::DeleteBucketLifecycleRequest& request) const; /** *

Deletes the lifecycle configuration from the specified bucket. Amazon S3 * removes all the lifecycle configuration rules in the lifecycle subresource * associated with the bucket. Your objects never expire, and Amazon S3 no longer * automatically deletes any objects on the basis of rules contained in the deleted * lifecycle configuration.

To use this operation, you must have permission * to perform the s3:PutLifecycleConfiguration action. By default, the * bucket owner has this permission and the bucket owner can grant this permission * to others.

There is usually some time lag before lifecycle configuration * deletion is fully propagated to all the Amazon S3 systems.

For more * information about the object expiration, see Elements * to Describe Lifecycle Actions.

Related actions include:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketLifecycleAsync(const Model::DeleteBucketLifecycleRequest& request, const DeleteBucketLifecycleResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes a metrics configuration for the Amazon CloudWatch request metrics * (specified by the metrics configuration ID) from the bucket. Note that this * doesn't include the daily storage metrics.

To use this operation, you * must have permissions to perform the s3:PutMetricsConfiguration * action. The bucket owner has this permission by default. The bucket owner can * grant this permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to DeleteBucketMetricsConfiguration:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketMetricsConfigurationOutcome DeleteBucketMetricsConfiguration(const Model::DeleteBucketMetricsConfigurationRequest& request) const; /** *

Deletes a metrics configuration for the Amazon CloudWatch request metrics * (specified by the metrics configuration ID) from the bucket. Note that this * doesn't include the daily storage metrics.

To use this operation, you * must have permissions to perform the s3:PutMetricsConfiguration * action. The bucket owner has this permission by default. The bucket owner can * grant this permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to DeleteBucketMetricsConfiguration:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketMetricsConfigurationOutcomeCallable DeleteBucketMetricsConfigurationCallable(const Model::DeleteBucketMetricsConfigurationRequest& request) const; /** *

Deletes a metrics configuration for the Amazon CloudWatch request metrics * (specified by the metrics configuration ID) from the bucket. Note that this * doesn't include the daily storage metrics.

To use this operation, you * must have permissions to perform the s3:PutMetricsConfiguration * action. The bucket owner has this permission by default. The bucket owner can * grant this permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to DeleteBucketMetricsConfiguration:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketMetricsConfigurationAsync(const Model::DeleteBucketMetricsConfigurationRequest& request, const DeleteBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Removes OwnershipControls for an Amazon S3 bucket. To use this * operation, you must have the s3:PutBucketOwnershipControls * permission. For more information about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * DeleteBucketOwnershipControls:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketOwnershipControlsOutcome DeleteBucketOwnershipControls(const Model::DeleteBucketOwnershipControlsRequest& request) const; /** *

Removes OwnershipControls for an Amazon S3 bucket. To use this * operation, you must have the s3:PutBucketOwnershipControls * permission. For more information about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * DeleteBucketOwnershipControls:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketOwnershipControlsOutcomeCallable DeleteBucketOwnershipControlsCallable(const Model::DeleteBucketOwnershipControlsRequest& request) const; /** *

Removes OwnershipControls for an Amazon S3 bucket. To use this * operation, you must have the s3:PutBucketOwnershipControls * permission. For more information about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * DeleteBucketOwnershipControls:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketOwnershipControlsAsync(const Model::DeleteBucketOwnershipControlsRequest& request, const DeleteBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This implementation of the DELETE action uses the policy subresource to * delete the policy of a specified bucket. If you are using an identity other than * the root user of the Amazon Web Services account that owns the bucket, the * calling identity must have the DeleteBucketPolicy permissions on * the specified bucket and belong to the bucket owner's account to use this * operation.

If you don't have DeleteBucketPolicy * permissions, Amazon S3 returns a 403 Access Denied error. If you * have the correct permissions, but you're not using an identity that belongs to * the bucket owner's account, Amazon S3 returns a 405 Method Not * Allowed error.

As a security precaution, the root * user of the Amazon Web Services account that owns a bucket can always use this * operation, even if the policy explicitly denies the root user the ability to * perform this action.

For more information about bucket * policies, see Using * Bucket Policies and UserPolicies.

The following operations are * related to DeleteBucketPolicy

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketPolicyOutcome DeleteBucketPolicy(const Model::DeleteBucketPolicyRequest& request) const; /** *

This implementation of the DELETE action uses the policy subresource to * delete the policy of a specified bucket. If you are using an identity other than * the root user of the Amazon Web Services account that owns the bucket, the * calling identity must have the DeleteBucketPolicy permissions on * the specified bucket and belong to the bucket owner's account to use this * operation.

If you don't have DeleteBucketPolicy * permissions, Amazon S3 returns a 403 Access Denied error. If you * have the correct permissions, but you're not using an identity that belongs to * the bucket owner's account, Amazon S3 returns a 405 Method Not * Allowed error.

As a security precaution, the root * user of the Amazon Web Services account that owns a bucket can always use this * operation, even if the policy explicitly denies the root user the ability to * perform this action.

For more information about bucket * policies, see Using * Bucket Policies and UserPolicies.

The following operations are * related to DeleteBucketPolicy

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketPolicyOutcomeCallable DeleteBucketPolicyCallable(const Model::DeleteBucketPolicyRequest& request) const; /** *

This implementation of the DELETE action uses the policy subresource to * delete the policy of a specified bucket. If you are using an identity other than * the root user of the Amazon Web Services account that owns the bucket, the * calling identity must have the DeleteBucketPolicy permissions on * the specified bucket and belong to the bucket owner's account to use this * operation.

If you don't have DeleteBucketPolicy * permissions, Amazon S3 returns a 403 Access Denied error. If you * have the correct permissions, but you're not using an identity that belongs to * the bucket owner's account, Amazon S3 returns a 405 Method Not * Allowed error.

As a security precaution, the root * user of the Amazon Web Services account that owns a bucket can always use this * operation, even if the policy explicitly denies the root user the ability to * perform this action.

For more information about bucket * policies, see Using * Bucket Policies and UserPolicies.

The following operations are * related to DeleteBucketPolicy

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketPolicyAsync(const Model::DeleteBucketPolicyRequest& request, const DeleteBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the replication configuration from the bucket.

To use this * operation, you must have permissions to perform the * s3:PutReplicationConfiguration action. The bucket owner has these * permissions by default and can grant it to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

It can take a * while for the deletion of a replication configuration to fully propagate.

*

For information about replication configuration, see Replication * in the Amazon S3 User Guide.

The following operations are related * to DeleteBucketReplication:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketReplicationOutcome DeleteBucketReplication(const Model::DeleteBucketReplicationRequest& request) const; /** *

Deletes the replication configuration from the bucket.

To use this * operation, you must have permissions to perform the * s3:PutReplicationConfiguration action. The bucket owner has these * permissions by default and can grant it to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

It can take a * while for the deletion of a replication configuration to fully propagate.

*

For information about replication configuration, see Replication * in the Amazon S3 User Guide.

The following operations are related * to DeleteBucketReplication:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketReplicationOutcomeCallable DeleteBucketReplicationCallable(const Model::DeleteBucketReplicationRequest& request) const; /** *

Deletes the replication configuration from the bucket.

To use this * operation, you must have permissions to perform the * s3:PutReplicationConfiguration action. The bucket owner has these * permissions by default and can grant it to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

It can take a * while for the deletion of a replication configuration to fully propagate.

*

For information about replication configuration, see Replication * in the Amazon S3 User Guide.

The following operations are related * to DeleteBucketReplication:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketReplicationAsync(const Model::DeleteBucketReplicationRequest& request, const DeleteBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the tags from the bucket.

To use this operation, you must have * permission to perform the s3:PutBucketTagging action. By default, * the bucket owner has this permission and can grant this permission to others. *

The following operations are related to * DeleteBucketTagging:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketTaggingOutcome DeleteBucketTagging(const Model::DeleteBucketTaggingRequest& request) const; /** *

Deletes the tags from the bucket.

To use this operation, you must have * permission to perform the s3:PutBucketTagging action. By default, * the bucket owner has this permission and can grant this permission to others. *

The following operations are related to * DeleteBucketTagging:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketTaggingOutcomeCallable DeleteBucketTaggingCallable(const Model::DeleteBucketTaggingRequest& request) const; /** *

Deletes the tags from the bucket.

To use this operation, you must have * permission to perform the s3:PutBucketTagging action. By default, * the bucket owner has this permission and can grant this permission to others. *

The following operations are related to * DeleteBucketTagging:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketTaggingAsync(const Model::DeleteBucketTaggingRequest& request, const DeleteBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action removes the website configuration for a bucket. Amazon S3 returns * a 200 OK response upon successfully deleting a website * configuration on the specified bucket. You will get a 200 OK * response if the website configuration you are trying to delete does not exist on * the bucket. Amazon S3 returns a 404 response if the bucket * specified in the request does not exist.

This DELETE action requires the * S3:DeleteBucketWebsite permission. By default, only the bucket * owner can delete the website configuration attached to a bucket. However, bucket * owners can grant other users permission to delete the website configuration by * writing a bucket policy granting them the S3:DeleteBucketWebsite * permission.

For more information about hosting websites, see Hosting * Websites on Amazon S3.

The following operations are related to * DeleteBucketWebsite:

See Also:

AWS * API Reference

*/ virtual Model::DeleteBucketWebsiteOutcome DeleteBucketWebsite(const Model::DeleteBucketWebsiteRequest& request) const; /** *

This action removes the website configuration for a bucket. Amazon S3 returns * a 200 OK response upon successfully deleting a website * configuration on the specified bucket. You will get a 200 OK * response if the website configuration you are trying to delete does not exist on * the bucket. Amazon S3 returns a 404 response if the bucket * specified in the request does not exist.

This DELETE action requires the * S3:DeleteBucketWebsite permission. By default, only the bucket * owner can delete the website configuration attached to a bucket. However, bucket * owners can grant other users permission to delete the website configuration by * writing a bucket policy granting them the S3:DeleteBucketWebsite * permission.

For more information about hosting websites, see Hosting * Websites on Amazon S3.

The following operations are related to * DeleteBucketWebsite:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteBucketWebsiteOutcomeCallable DeleteBucketWebsiteCallable(const Model::DeleteBucketWebsiteRequest& request) const; /** *

This action removes the website configuration for a bucket. Amazon S3 returns * a 200 OK response upon successfully deleting a website * configuration on the specified bucket. You will get a 200 OK * response if the website configuration you are trying to delete does not exist on * the bucket. Amazon S3 returns a 404 response if the bucket * specified in the request does not exist.

This DELETE action requires the * S3:DeleteBucketWebsite permission. By default, only the bucket * owner can delete the website configuration attached to a bucket. However, bucket * owners can grant other users permission to delete the website configuration by * writing a bucket policy granting them the S3:DeleteBucketWebsite * permission.

For more information about hosting websites, see Hosting * Websites on Amazon S3.

The following operations are related to * DeleteBucketWebsite:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteBucketWebsiteAsync(const Model::DeleteBucketWebsiteRequest& request, const DeleteBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Removes the null version (if there is one) of an object and inserts a delete * marker, which becomes the latest version of the object. If there isn't a null * version, Amazon S3 does not remove any objects but will still respond that the * command was successful.

To remove a specific version, you must be the * bucket owner and you must use the version Id subresource. Using this subresource * permanently deletes the version. If the object deleted is a delete marker, * Amazon S3 sets the response header, x-amz-delete-marker, to true. *

If the object you want to delete is in a bucket where the bucket * versioning configuration is MFA Delete enabled, you must include the * x-amz-mfa request header in the DELETE versionId * request. Requests that include x-amz-mfa must use HTTPS.

* For more information about MFA Delete, see Using * MFA Delete. To see sample requests that use versioning, see Sample * Request.

You can delete objects by explicitly calling DELETE Object * or configure its lifecycle (PutBucketLifecycle) * to enable Amazon S3 to remove them for you. If you want to block users or * accounts from removing or deleting objects from your bucket, you must deny them * the s3:DeleteObject, s3:DeleteObjectVersion, and * s3:PutLifeCycleConfiguration actions.

The following action * is related to DeleteObject:

See Also:

AWS API * Reference

*/ virtual Model::DeleteObjectOutcome DeleteObject(const Model::DeleteObjectRequest& request) const; /** *

Removes the null version (if there is one) of an object and inserts a delete * marker, which becomes the latest version of the object. If there isn't a null * version, Amazon S3 does not remove any objects but will still respond that the * command was successful.

To remove a specific version, you must be the * bucket owner and you must use the version Id subresource. Using this subresource * permanently deletes the version. If the object deleted is a delete marker, * Amazon S3 sets the response header, x-amz-delete-marker, to true. *

If the object you want to delete is in a bucket where the bucket * versioning configuration is MFA Delete enabled, you must include the * x-amz-mfa request header in the DELETE versionId * request. Requests that include x-amz-mfa must use HTTPS.

* For more information about MFA Delete, see Using * MFA Delete. To see sample requests that use versioning, see Sample * Request.

You can delete objects by explicitly calling DELETE Object * or configure its lifecycle (PutBucketLifecycle) * to enable Amazon S3 to remove them for you. If you want to block users or * accounts from removing or deleting objects from your bucket, you must deny them * the s3:DeleteObject, s3:DeleteObjectVersion, and * s3:PutLifeCycleConfiguration actions.

The following action * is related to DeleteObject:

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteObjectOutcomeCallable DeleteObjectCallable(const Model::DeleteObjectRequest& request) const; /** *

Removes the null version (if there is one) of an object and inserts a delete * marker, which becomes the latest version of the object. If there isn't a null * version, Amazon S3 does not remove any objects but will still respond that the * command was successful.

To remove a specific version, you must be the * bucket owner and you must use the version Id subresource. Using this subresource * permanently deletes the version. If the object deleted is a delete marker, * Amazon S3 sets the response header, x-amz-delete-marker, to true. *

If the object you want to delete is in a bucket where the bucket * versioning configuration is MFA Delete enabled, you must include the * x-amz-mfa request header in the DELETE versionId * request. Requests that include x-amz-mfa must use HTTPS.

* For more information about MFA Delete, see Using * MFA Delete. To see sample requests that use versioning, see Sample * Request.

You can delete objects by explicitly calling DELETE Object * or configure its lifecycle (PutBucketLifecycle) * to enable Amazon S3 to remove them for you. If you want to block users or * accounts from removing or deleting objects from your bucket, you must deny them * the s3:DeleteObject, s3:DeleteObjectVersion, and * s3:PutLifeCycleConfiguration actions.

The following action * is related to DeleteObject:

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteObjectAsync(const Model::DeleteObjectRequest& request, const DeleteObjectResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Removes the entire tag set from the specified object. For more information * about managing object tags, see * Object Tagging.

To use this operation, you must have permission to * perform the s3:DeleteObjectTagging action.

To delete tags of * a specific object version, add the versionId query parameter in the * request. You will need permission for the * s3:DeleteObjectVersionTagging action.

The following * operations are related to DeleteBucketMetricsConfiguration:

*

See Also:

AWS * API Reference

*/ virtual Model::DeleteObjectTaggingOutcome DeleteObjectTagging(const Model::DeleteObjectTaggingRequest& request) const; /** *

Removes the entire tag set from the specified object. For more information * about managing object tags, see * Object Tagging.

To use this operation, you must have permission to * perform the s3:DeleteObjectTagging action.

To delete tags of * a specific object version, add the versionId query parameter in the * request. You will need permission for the * s3:DeleteObjectVersionTagging action.

The following * operations are related to DeleteBucketMetricsConfiguration:

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteObjectTaggingOutcomeCallable DeleteObjectTaggingCallable(const Model::DeleteObjectTaggingRequest& request) const; /** *

Removes the entire tag set from the specified object. For more information * about managing object tags, see * Object Tagging.

To use this operation, you must have permission to * perform the s3:DeleteObjectTagging action.

To delete tags of * a specific object version, add the versionId query parameter in the * request. You will need permission for the * s3:DeleteObjectVersionTagging action.

The following * operations are related to DeleteBucketMetricsConfiguration:

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteObjectTaggingAsync(const Model::DeleteObjectTaggingRequest& request, const DeleteObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action enables you to delete multiple objects from a bucket using a * single HTTP request. If you know the object keys that you want to delete, then * this action provides a suitable alternative to sending individual delete * requests, reducing per-request overhead.

The request contains a list of * up to 1000 keys that you want to delete. In the XML, you provide the object key * names, and optionally, version IDs if you want to delete a specific version of * the object from a versioning-enabled bucket. For each key, Amazon S3 performs a * delete action and returns the result of that delete, success, or failure, in the * response. Note that if the object specified in the request is not found, Amazon * S3 returns the result as deleted.

The action supports two modes for the * response: verbose and quiet. By default, the action uses verbose mode in which * the response includes the result of deletion of each key in your request. In * quiet mode the response includes only keys where the delete action encountered * an error. For a successful deletion, the action does not return any information * about the delete in the response body.

When performing this action on an * MFA Delete enabled bucket, that attempts to delete any versioned objects, you * must include an MFA token. If you do not provide one, the entire request will * fail, even if there are non-versioned objects you are trying to delete. If you * provide an invalid token, whether there are versioned keys in the request or * not, the entire Multi-Object Delete request will fail. For information about MFA * Delete, see * MFA Delete.

Finally, the Content-MD5 header is required for all * Multi-Object Delete requests. Amazon S3 uses the header value to ensure that * your request body has not been altered in transit.

The following * operations are related to DeleteObjects:

See Also:

AWS * API Reference

*/ virtual Model::DeleteObjectsOutcome DeleteObjects(const Model::DeleteObjectsRequest& request) const; /** *

This action enables you to delete multiple objects from a bucket using a * single HTTP request. If you know the object keys that you want to delete, then * this action provides a suitable alternative to sending individual delete * requests, reducing per-request overhead.

The request contains a list of * up to 1000 keys that you want to delete. In the XML, you provide the object key * names, and optionally, version IDs if you want to delete a specific version of * the object from a versioning-enabled bucket. For each key, Amazon S3 performs a * delete action and returns the result of that delete, success, or failure, in the * response. Note that if the object specified in the request is not found, Amazon * S3 returns the result as deleted.

The action supports two modes for the * response: verbose and quiet. By default, the action uses verbose mode in which * the response includes the result of deletion of each key in your request. In * quiet mode the response includes only keys where the delete action encountered * an error. For a successful deletion, the action does not return any information * about the delete in the response body.

When performing this action on an * MFA Delete enabled bucket, that attempts to delete any versioned objects, you * must include an MFA token. If you do not provide one, the entire request will * fail, even if there are non-versioned objects you are trying to delete. If you * provide an invalid token, whether there are versioned keys in the request or * not, the entire Multi-Object Delete request will fail. For information about MFA * Delete, see * MFA Delete.

Finally, the Content-MD5 header is required for all * Multi-Object Delete requests. Amazon S3 uses the header value to ensure that * your request body has not been altered in transit.

The following * operations are related to DeleteObjects:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteObjectsOutcomeCallable DeleteObjectsCallable(const Model::DeleteObjectsRequest& request) const; /** *

This action enables you to delete multiple objects from a bucket using a * single HTTP request. If you know the object keys that you want to delete, then * this action provides a suitable alternative to sending individual delete * requests, reducing per-request overhead.

The request contains a list of * up to 1000 keys that you want to delete. In the XML, you provide the object key * names, and optionally, version IDs if you want to delete a specific version of * the object from a versioning-enabled bucket. For each key, Amazon S3 performs a * delete action and returns the result of that delete, success, or failure, in the * response. Note that if the object specified in the request is not found, Amazon * S3 returns the result as deleted.

The action supports two modes for the * response: verbose and quiet. By default, the action uses verbose mode in which * the response includes the result of deletion of each key in your request. In * quiet mode the response includes only keys where the delete action encountered * an error. For a successful deletion, the action does not return any information * about the delete in the response body.

When performing this action on an * MFA Delete enabled bucket, that attempts to delete any versioned objects, you * must include an MFA token. If you do not provide one, the entire request will * fail, even if there are non-versioned objects you are trying to delete. If you * provide an invalid token, whether there are versioned keys in the request or * not, the entire Multi-Object Delete request will fail. For information about MFA * Delete, see * MFA Delete.

Finally, the Content-MD5 header is required for all * Multi-Object Delete requests. Amazon S3 uses the header value to ensure that * your request body has not been altered in transit.

The following * operations are related to DeleteObjects:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteObjectsAsync(const Model::DeleteObjectsRequest& request, const DeleteObjectsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Removes the PublicAccessBlock configuration for an Amazon S3 * bucket. To use this operation, you must have the * s3:PutBucketPublicAccessBlock permission. For more information * about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The following * operations are related to DeletePublicAccessBlock:

See Also:

AWS * API Reference

*/ virtual Model::DeletePublicAccessBlockOutcome DeletePublicAccessBlock(const Model::DeletePublicAccessBlockRequest& request) const; /** *

Removes the PublicAccessBlock configuration for an Amazon S3 * bucket. To use this operation, you must have the * s3:PutBucketPublicAccessBlock permission. For more information * about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The following * operations are related to DeletePublicAccessBlock:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeletePublicAccessBlockOutcomeCallable DeletePublicAccessBlockCallable(const Model::DeletePublicAccessBlockRequest& request) const; /** *

Removes the PublicAccessBlock configuration for an Amazon S3 * bucket. To use this operation, you must have the * s3:PutBucketPublicAccessBlock permission. For more information * about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The following * operations are related to DeletePublicAccessBlock:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeletePublicAccessBlockAsync(const Model::DeletePublicAccessBlockRequest& request, const DeletePublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This implementation of the GET action uses the accelerate * subresource to return the Transfer Acceleration state of a bucket, which is * either Enabled or Suspended. Amazon S3 Transfer * Acceleration is a bucket-level feature that enables you to perform faster data * transfers to and from Amazon S3.

To use this operation, you must have * permission to perform the s3:GetAccelerateConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to your Amazon S3 Resources in the Amazon S3 User * Guide.

You set the Transfer Acceleration state of an existing bucket * to Enabled or Suspended by using the PutBucketAccelerateConfiguration * operation.

A GET accelerate request does not return a state * value for a bucket that has no transfer acceleration state. A bucket has no * Transfer Acceleration state if a state has never been set on the bucket.

*

For more information about transfer acceleration, see Transfer * Acceleration in the Amazon S3 User Guide.

Related * Resources

See Also:

AWS * API Reference

*/ virtual Model::GetBucketAccelerateConfigurationOutcome GetBucketAccelerateConfiguration(const Model::GetBucketAccelerateConfigurationRequest& request) const; /** *

This implementation of the GET action uses the accelerate * subresource to return the Transfer Acceleration state of a bucket, which is * either Enabled or Suspended. Amazon S3 Transfer * Acceleration is a bucket-level feature that enables you to perform faster data * transfers to and from Amazon S3.

To use this operation, you must have * permission to perform the s3:GetAccelerateConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to your Amazon S3 Resources in the Amazon S3 User * Guide.

You set the Transfer Acceleration state of an existing bucket * to Enabled or Suspended by using the PutBucketAccelerateConfiguration * operation.

A GET accelerate request does not return a state * value for a bucket that has no transfer acceleration state. A bucket has no * Transfer Acceleration state if a state has never been set on the bucket.

*

For more information about transfer acceleration, see Transfer * Acceleration in the Amazon S3 User Guide.

Related * Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketAccelerateConfigurationOutcomeCallable GetBucketAccelerateConfigurationCallable(const Model::GetBucketAccelerateConfigurationRequest& request) const; /** *

This implementation of the GET action uses the accelerate * subresource to return the Transfer Acceleration state of a bucket, which is * either Enabled or Suspended. Amazon S3 Transfer * Acceleration is a bucket-level feature that enables you to perform faster data * transfers to and from Amazon S3.

To use this operation, you must have * permission to perform the s3:GetAccelerateConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to your Amazon S3 Resources in the Amazon S3 User * Guide.

You set the Transfer Acceleration state of an existing bucket * to Enabled or Suspended by using the PutBucketAccelerateConfiguration * operation.

A GET accelerate request does not return a state * value for a bucket that has no transfer acceleration state. A bucket has no * Transfer Acceleration state if a state has never been set on the bucket.

*

For more information about transfer acceleration, see Transfer * Acceleration in the Amazon S3 User Guide.

Related * Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketAccelerateConfigurationAsync(const Model::GetBucketAccelerateConfigurationRequest& request, const GetBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This implementation of the GET action uses the acl * subresource to return the access control list (ACL) of a bucket. To use * GET to return the ACL of the bucket, you must have * READ_ACP access to the bucket. If READ_ACP permission * is granted to the anonymous user, you can return the ACL of the bucket without * using an authorization header.

Related Resources *

See Also:

AWS API * Reference

*/ virtual Model::GetBucketAclOutcome GetBucketAcl(const Model::GetBucketAclRequest& request) const; /** *

This implementation of the GET action uses the acl * subresource to return the access control list (ACL) of a bucket. To use * GET to return the ACL of the bucket, you must have * READ_ACP access to the bucket. If READ_ACP permission * is granted to the anonymous user, you can return the ACL of the bucket without * using an authorization header.

Related Resources *

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketAclOutcomeCallable GetBucketAclCallable(const Model::GetBucketAclRequest& request) const; /** *

This implementation of the GET action uses the acl * subresource to return the access control list (ACL) of a bucket. To use * GET to return the ACL of the bucket, you must have * READ_ACP access to the bucket. If READ_ACP permission * is granted to the anonymous user, you can return the ACL of the bucket without * using an authorization header.

Related Resources *

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketAclAsync(const Model::GetBucketAclRequest& request, const GetBucketAclResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This implementation of the GET action returns an analytics configuration * (identified by the analytics configuration ID) from the bucket.

To use * this operation, you must have permissions to perform the * s3:GetAnalyticsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see * Permissions Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

For information about Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis in the Amazon S3 User * Guide.

Related Resources

See Also:

AWS * API Reference

*/ virtual Model::GetBucketAnalyticsConfigurationOutcome GetBucketAnalyticsConfiguration(const Model::GetBucketAnalyticsConfigurationRequest& request) const; /** *

This implementation of the GET action returns an analytics configuration * (identified by the analytics configuration ID) from the bucket.

To use * this operation, you must have permissions to perform the * s3:GetAnalyticsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see * Permissions Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

For information about Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis in the Amazon S3 User * Guide.

Related Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketAnalyticsConfigurationOutcomeCallable GetBucketAnalyticsConfigurationCallable(const Model::GetBucketAnalyticsConfigurationRequest& request) const; /** *

This implementation of the GET action returns an analytics configuration * (identified by the analytics configuration ID) from the bucket.

To use * this operation, you must have permissions to perform the * s3:GetAnalyticsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see * Permissions Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

For information about Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis in the Amazon S3 User * Guide.

Related Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketAnalyticsConfigurationAsync(const Model::GetBucketAnalyticsConfigurationRequest& request, const GetBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the cors configuration information set for the bucket.

To use * this operation, you must have permission to perform the s3:GetBucketCORS action. * By default, the bucket owner has this permission and can grant it to others.

*

For more information about cors, see Enabling * Cross-Origin Resource Sharing.

The following operations are related * to GetBucketCors:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketCorsOutcome GetBucketCors(const Model::GetBucketCorsRequest& request) const; /** *

Returns the cors configuration information set for the bucket.

To use * this operation, you must have permission to perform the s3:GetBucketCORS action. * By default, the bucket owner has this permission and can grant it to others.

*

For more information about cors, see Enabling * Cross-Origin Resource Sharing.

The following operations are related * to GetBucketCors:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketCorsOutcomeCallable GetBucketCorsCallable(const Model::GetBucketCorsRequest& request) const; /** *

Returns the cors configuration information set for the bucket.

To use * this operation, you must have permission to perform the s3:GetBucketCORS action. * By default, the bucket owner has this permission and can grant it to others.

*

For more information about cors, see Enabling * Cross-Origin Resource Sharing.

The following operations are related * to GetBucketCors:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketCorsAsync(const Model::GetBucketCorsRequest& request, const GetBucketCorsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the default encryption configuration for an Amazon S3 bucket. If the * bucket does not have a default encryption configuration, GetBucketEncryption * returns ServerSideEncryptionConfigurationNotFoundError.

For * information about the Amazon S3 default encryption feature, see Amazon * S3 Default Bucket Encryption.

To use this operation, you must have * permission to perform the s3:GetEncryptionConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The following * operations are related to GetBucketEncryption:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketEncryptionOutcome GetBucketEncryption(const Model::GetBucketEncryptionRequest& request) const; /** *

Returns the default encryption configuration for an Amazon S3 bucket. If the * bucket does not have a default encryption configuration, GetBucketEncryption * returns ServerSideEncryptionConfigurationNotFoundError.

For * information about the Amazon S3 default encryption feature, see Amazon * S3 Default Bucket Encryption.

To use this operation, you must have * permission to perform the s3:GetEncryptionConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The following * operations are related to GetBucketEncryption:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketEncryptionOutcomeCallable GetBucketEncryptionCallable(const Model::GetBucketEncryptionRequest& request) const; /** *

Returns the default encryption configuration for an Amazon S3 bucket. If the * bucket does not have a default encryption configuration, GetBucketEncryption * returns ServerSideEncryptionConfigurationNotFoundError.

For * information about the Amazon S3 default encryption feature, see Amazon * S3 Default Bucket Encryption.

To use this operation, you must have * permission to perform the s3:GetEncryptionConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The following * operations are related to GetBucketEncryption:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketEncryptionAsync(const Model::GetBucketEncryptionRequest& request, const GetBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

*

The S3 Intelligent-Tiering storage class is designed to optimize storage * costs by automatically moving data to the most cost-effective storage access * tier, without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * GetBucketIntelligentTieringConfiguration include:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketIntelligentTieringConfigurationOutcome GetBucketIntelligentTieringConfiguration(const Model::GetBucketIntelligentTieringConfigurationRequest& request) const; /** *

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

*

The S3 Intelligent-Tiering storage class is designed to optimize storage * costs by automatically moving data to the most cost-effective storage access * tier, without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * GetBucketIntelligentTieringConfiguration include:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketIntelligentTieringConfigurationOutcomeCallable GetBucketIntelligentTieringConfigurationCallable(const Model::GetBucketIntelligentTieringConfigurationRequest& request) const; /** *

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

*

The S3 Intelligent-Tiering storage class is designed to optimize storage * costs by automatically moving data to the most cost-effective storage access * tier, without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * GetBucketIntelligentTieringConfiguration include:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketIntelligentTieringConfigurationAsync(const Model::GetBucketIntelligentTieringConfigurationRequest& request, const GetBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns an inventory configuration (identified by the inventory configuration * ID) from the bucket.

To use this operation, you must have permissions to * perform the s3:GetInventoryConfiguration action. The bucket owner * has this permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory.

The following operations are related to * GetBucketInventoryConfiguration:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketInventoryConfigurationOutcome GetBucketInventoryConfiguration(const Model::GetBucketInventoryConfigurationRequest& request) const; /** *

Returns an inventory configuration (identified by the inventory configuration * ID) from the bucket.

To use this operation, you must have permissions to * perform the s3:GetInventoryConfiguration action. The bucket owner * has this permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory.

The following operations are related to * GetBucketInventoryConfiguration:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketInventoryConfigurationOutcomeCallable GetBucketInventoryConfigurationCallable(const Model::GetBucketInventoryConfigurationRequest& request) const; /** *

Returns an inventory configuration (identified by the inventory configuration * ID) from the bucket.

To use this operation, you must have permissions to * perform the s3:GetInventoryConfiguration action. The bucket owner * has this permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory.

The following operations are related to * GetBucketInventoryConfiguration:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketInventoryConfigurationAsync(const Model::GetBucketInventoryConfigurationRequest& request, const GetBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Bucket lifecycle configuration now supports specifying a lifecycle * rule using an object key name prefix, one or more object tags, or a combination * of both. Accordingly, this section describes the latest API. The response * describes the new filter element that you can use to specify a filter to select * a subset of objects to which the rule applies. If you are using a previous * version of the lifecycle configuration, it still works. For the earlier action, * see GetBucketLifecycle.

*

Returns the lifecycle configuration information set on the bucket. * For information about lifecycle configuration, see Object * Lifecycle Management.

To use this operation, you must have permission * to perform the s3:GetLifecycleConfiguration action. The bucket * owner has this permission, by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* GetBucketLifecycleConfiguration has the following special * error:

  • Error code: NoSuchLifecycleConfiguration *

    • Description: The lifecycle configuration does not exist.

      *
    • HTTP Status Code: 404 Not Found

    • SOAP Fault Code * Prefix: Client

The following operations are * related to GetBucketLifecycleConfiguration:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketLifecycleConfigurationOutcome GetBucketLifecycleConfiguration(const Model::GetBucketLifecycleConfigurationRequest& request) const; /** *

Bucket lifecycle configuration now supports specifying a lifecycle * rule using an object key name prefix, one or more object tags, or a combination * of both. Accordingly, this section describes the latest API. The response * describes the new filter element that you can use to specify a filter to select * a subset of objects to which the rule applies. If you are using a previous * version of the lifecycle configuration, it still works. For the earlier action, * see GetBucketLifecycle.

*

Returns the lifecycle configuration information set on the bucket. * For information about lifecycle configuration, see Object * Lifecycle Management.

To use this operation, you must have permission * to perform the s3:GetLifecycleConfiguration action. The bucket * owner has this permission, by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* GetBucketLifecycleConfiguration has the following special * error:

  • Error code: NoSuchLifecycleConfiguration *

    • Description: The lifecycle configuration does not exist.

      *
    • HTTP Status Code: 404 Not Found

    • SOAP Fault Code * Prefix: Client

The following operations are * related to GetBucketLifecycleConfiguration:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketLifecycleConfigurationOutcomeCallable GetBucketLifecycleConfigurationCallable(const Model::GetBucketLifecycleConfigurationRequest& request) const; /** *

Bucket lifecycle configuration now supports specifying a lifecycle * rule using an object key name prefix, one or more object tags, or a combination * of both. Accordingly, this section describes the latest API. The response * describes the new filter element that you can use to specify a filter to select * a subset of objects to which the rule applies. If you are using a previous * version of the lifecycle configuration, it still works. For the earlier action, * see GetBucketLifecycle.

*

Returns the lifecycle configuration information set on the bucket. * For information about lifecycle configuration, see Object * Lifecycle Management.

To use this operation, you must have permission * to perform the s3:GetLifecycleConfiguration action. The bucket * owner has this permission, by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* GetBucketLifecycleConfiguration has the following special * error:

  • Error code: NoSuchLifecycleConfiguration *

    • Description: The lifecycle configuration does not exist.

      *
    • HTTP Status Code: 404 Not Found

    • SOAP Fault Code * Prefix: Client

The following operations are * related to GetBucketLifecycleConfiguration:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketLifecycleConfigurationAsync(const Model::GetBucketLifecycleConfigurationRequest& request, const GetBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the Region the bucket resides in. You set the bucket's Region using * the LocationConstraint request parameter in a * CreateBucket request. For more information, see CreateBucket.

*

To use this implementation of the operation, you must be the bucket * owner.

To use this API against an access point, provide the alias of the * access point in place of the bucket name.

The following operations are * related to GetBucketLocation:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketLocationOutcome GetBucketLocation(const Model::GetBucketLocationRequest& request) const; /** *

Returns the Region the bucket resides in. You set the bucket's Region using * the LocationConstraint request parameter in a * CreateBucket request. For more information, see CreateBucket.

*

To use this implementation of the operation, you must be the bucket * owner.

To use this API against an access point, provide the alias of the * access point in place of the bucket name.

The following operations are * related to GetBucketLocation:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketLocationOutcomeCallable GetBucketLocationCallable(const Model::GetBucketLocationRequest& request) const; /** *

Returns the Region the bucket resides in. You set the bucket's Region using * the LocationConstraint request parameter in a * CreateBucket request. For more information, see CreateBucket.

*

To use this implementation of the operation, you must be the bucket * owner.

To use this API against an access point, provide the alias of the * access point in place of the bucket name.

The following operations are * related to GetBucketLocation:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketLocationAsync(const Model::GetBucketLocationRequest& request, const GetBucketLocationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the logging status of a bucket and the permissions users have to view * and modify that status. To use GET, you must be the bucket owner.

The * following operations are related to GetBucketLogging:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketLoggingOutcome GetBucketLogging(const Model::GetBucketLoggingRequest& request) const; /** *

Returns the logging status of a bucket and the permissions users have to view * and modify that status. To use GET, you must be the bucket owner.

The * following operations are related to GetBucketLogging:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketLoggingOutcomeCallable GetBucketLoggingCallable(const Model::GetBucketLoggingRequest& request) const; /** *

Returns the logging status of a bucket and the permissions users have to view * and modify that status. To use GET, you must be the bucket owner.

The * following operations are related to GetBucketLogging:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketLoggingAsync(const Model::GetBucketLoggingRequest& request, const GetBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a metrics configuration (specified by the metrics configuration ID) from * the bucket. Note that this doesn't include the daily storage metrics.

To * use this operation, you must have permissions to perform the * s3:GetMetricsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information * about CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to GetBucketMetricsConfiguration:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketMetricsConfigurationOutcome GetBucketMetricsConfiguration(const Model::GetBucketMetricsConfigurationRequest& request) const; /** *

Gets a metrics configuration (specified by the metrics configuration ID) from * the bucket. Note that this doesn't include the daily storage metrics.

To * use this operation, you must have permissions to perform the * s3:GetMetricsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information * about CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to GetBucketMetricsConfiguration:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketMetricsConfigurationOutcomeCallable GetBucketMetricsConfigurationCallable(const Model::GetBucketMetricsConfigurationRequest& request) const; /** *

Gets a metrics configuration (specified by the metrics configuration ID) from * the bucket. Note that this doesn't include the daily storage metrics.

To * use this operation, you must have permissions to perform the * s3:GetMetricsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information * about CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to GetBucketMetricsConfiguration:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketMetricsConfigurationAsync(const Model::GetBucketMetricsConfigurationRequest& request, const GetBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the notification configuration of a bucket.

If notifications * are not enabled on the bucket, the action returns an empty * NotificationConfiguration element.

By default, you must be * the bucket owner to read the notification configuration of a bucket. However, * the bucket owner can use a bucket policy to grant permission to other users to * read this configuration with the s3:GetBucketNotification * permission.

For more information about setting and reading the * notification configuration on a bucket, see Setting * Up Notification of Bucket Events. For more information about bucket * policies, see Using * Bucket Policies.

The following action is related to * GetBucketNotification:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketNotificationConfigurationOutcome GetBucketNotificationConfiguration(const Model::GetBucketNotificationConfigurationRequest& request) const; /** *

Returns the notification configuration of a bucket.

If notifications * are not enabled on the bucket, the action returns an empty * NotificationConfiguration element.

By default, you must be * the bucket owner to read the notification configuration of a bucket. However, * the bucket owner can use a bucket policy to grant permission to other users to * read this configuration with the s3:GetBucketNotification * permission.

For more information about setting and reading the * notification configuration on a bucket, see Setting * Up Notification of Bucket Events. For more information about bucket * policies, see Using * Bucket Policies.

The following action is related to * GetBucketNotification:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketNotificationConfigurationOutcomeCallable GetBucketNotificationConfigurationCallable(const Model::GetBucketNotificationConfigurationRequest& request) const; /** *

Returns the notification configuration of a bucket.

If notifications * are not enabled on the bucket, the action returns an empty * NotificationConfiguration element.

By default, you must be * the bucket owner to read the notification configuration of a bucket. However, * the bucket owner can use a bucket policy to grant permission to other users to * read this configuration with the s3:GetBucketNotification * permission.

For more information about setting and reading the * notification configuration on a bucket, see Setting * Up Notification of Bucket Events. For more information about bucket * policies, see Using * Bucket Policies.

The following action is related to * GetBucketNotification:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketNotificationConfigurationAsync(const Model::GetBucketNotificationConfigurationRequest& request, const GetBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Retrieves OwnershipControls for an Amazon S3 bucket. To use this * operation, you must have the s3:GetBucketOwnershipControls * permission. For more information about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * GetBucketOwnershipControls:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketOwnershipControlsOutcome GetBucketOwnershipControls(const Model::GetBucketOwnershipControlsRequest& request) const; /** *

Retrieves OwnershipControls for an Amazon S3 bucket. To use this * operation, you must have the s3:GetBucketOwnershipControls * permission. For more information about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * GetBucketOwnershipControls:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketOwnershipControlsOutcomeCallable GetBucketOwnershipControlsCallable(const Model::GetBucketOwnershipControlsRequest& request) const; /** *

Retrieves OwnershipControls for an Amazon S3 bucket. To use this * operation, you must have the s3:GetBucketOwnershipControls * permission. For more information about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * GetBucketOwnershipControls:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketOwnershipControlsAsync(const Model::GetBucketOwnershipControlsRequest& request, const GetBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the policy of a specified bucket. If you are using an identity other * than the root user of the Amazon Web Services account that owns the bucket, the * calling identity must have the GetBucketPolicy permissions on the * specified bucket and belong to the bucket owner's account in order to use this * operation.

If you don't have GetBucketPolicy permissions, * Amazon S3 returns a 403 Access Denied error. If you have the * correct permissions, but you're not using an identity that belongs to the bucket * owner's account, Amazon S3 returns a 405 Method Not Allowed * error.

As a security precaution, the root user of the Amazon * Web Services account that owns a bucket can always use this operation, even if * the policy explicitly denies the root user the ability to perform this * action.

For more information about bucket policies, see Using * Bucket Policies and User Policies.

The following action is related to * GetBucketPolicy:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketPolicyOutcome GetBucketPolicy(const Model::GetBucketPolicyRequest& request) const; /** *

Returns the policy of a specified bucket. If you are using an identity other * than the root user of the Amazon Web Services account that owns the bucket, the * calling identity must have the GetBucketPolicy permissions on the * specified bucket and belong to the bucket owner's account in order to use this * operation.

If you don't have GetBucketPolicy permissions, * Amazon S3 returns a 403 Access Denied error. If you have the * correct permissions, but you're not using an identity that belongs to the bucket * owner's account, Amazon S3 returns a 405 Method Not Allowed * error.

As a security precaution, the root user of the Amazon * Web Services account that owns a bucket can always use this operation, even if * the policy explicitly denies the root user the ability to perform this * action.

For more information about bucket policies, see Using * Bucket Policies and User Policies.

The following action is related to * GetBucketPolicy:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketPolicyOutcomeCallable GetBucketPolicyCallable(const Model::GetBucketPolicyRequest& request) const; /** *

Returns the policy of a specified bucket. If you are using an identity other * than the root user of the Amazon Web Services account that owns the bucket, the * calling identity must have the GetBucketPolicy permissions on the * specified bucket and belong to the bucket owner's account in order to use this * operation.

If you don't have GetBucketPolicy permissions, * Amazon S3 returns a 403 Access Denied error. If you have the * correct permissions, but you're not using an identity that belongs to the bucket * owner's account, Amazon S3 returns a 405 Method Not Allowed * error.

As a security precaution, the root user of the Amazon * Web Services account that owns a bucket can always use this operation, even if * the policy explicitly denies the root user the ability to perform this * action.

For more information about bucket policies, see Using * Bucket Policies and User Policies.

The following action is related to * GetBucketPolicy:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketPolicyAsync(const Model::GetBucketPolicyRequest& request, const GetBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Retrieves the policy status for an Amazon S3 bucket, indicating whether the * bucket is public. In order to use this operation, you must have the * s3:GetBucketPolicyStatus permission. For more information about * Amazon S3 permissions, see Specifying * Permissions in a Policy.

For more information about when Amazon S3 * considers a bucket public, see The * Meaning of "Public".

The following operations are related to * GetBucketPolicyStatus:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketPolicyStatusOutcome GetBucketPolicyStatus(const Model::GetBucketPolicyStatusRequest& request) const; /** *

Retrieves the policy status for an Amazon S3 bucket, indicating whether the * bucket is public. In order to use this operation, you must have the * s3:GetBucketPolicyStatus permission. For more information about * Amazon S3 permissions, see Specifying * Permissions in a Policy.

For more information about when Amazon S3 * considers a bucket public, see The * Meaning of "Public".

The following operations are related to * GetBucketPolicyStatus:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketPolicyStatusOutcomeCallable GetBucketPolicyStatusCallable(const Model::GetBucketPolicyStatusRequest& request) const; /** *

Retrieves the policy status for an Amazon S3 bucket, indicating whether the * bucket is public. In order to use this operation, you must have the * s3:GetBucketPolicyStatus permission. For more information about * Amazon S3 permissions, see Specifying * Permissions in a Policy.

For more information about when Amazon S3 * considers a bucket public, see The * Meaning of "Public".

The following operations are related to * GetBucketPolicyStatus:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketPolicyStatusAsync(const Model::GetBucketPolicyStatusRequest& request, const GetBucketPolicyStatusResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the replication configuration of a bucket.

It can take * a while to propagate the put or delete a replication configuration to all Amazon * S3 systems. Therefore, a get request soon after put or delete can return a wrong * result.

For information about replication configuration, see Replication * in the Amazon S3 User Guide.

This action requires permissions for * the s3:GetReplicationConfiguration action. For more information * about permissions, see Using * Bucket Policies and User Policies.

If you include the * Filter element in a replication configuration, you must also * include the DeleteMarkerReplication and Priority * elements. The response also returns those elements.

For information about * GetBucketReplication errors, see List * of replication-related error codes

The following operations are * related to GetBucketReplication:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketReplicationOutcome GetBucketReplication(const Model::GetBucketReplicationRequest& request) const; /** *

Returns the replication configuration of a bucket.

It can take * a while to propagate the put or delete a replication configuration to all Amazon * S3 systems. Therefore, a get request soon after put or delete can return a wrong * result.

For information about replication configuration, see Replication * in the Amazon S3 User Guide.

This action requires permissions for * the s3:GetReplicationConfiguration action. For more information * about permissions, see Using * Bucket Policies and User Policies.

If you include the * Filter element in a replication configuration, you must also * include the DeleteMarkerReplication and Priority * elements. The response also returns those elements.

For information about * GetBucketReplication errors, see List * of replication-related error codes

The following operations are * related to GetBucketReplication:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketReplicationOutcomeCallable GetBucketReplicationCallable(const Model::GetBucketReplicationRequest& request) const; /** *

Returns the replication configuration of a bucket.

It can take * a while to propagate the put or delete a replication configuration to all Amazon * S3 systems. Therefore, a get request soon after put or delete can return a wrong * result.

For information about replication configuration, see Replication * in the Amazon S3 User Guide.

This action requires permissions for * the s3:GetReplicationConfiguration action. For more information * about permissions, see Using * Bucket Policies and User Policies.

If you include the * Filter element in a replication configuration, you must also * include the DeleteMarkerReplication and Priority * elements. The response also returns those elements.

For information about * GetBucketReplication errors, see List * of replication-related error codes

The following operations are * related to GetBucketReplication:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketReplicationAsync(const Model::GetBucketReplicationRequest& request, const GetBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the request payment configuration of a bucket. To use this version of * the operation, you must be the bucket owner. For more information, see Requester * Pays Buckets.

The following operations are related to * GetBucketRequestPayment:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketRequestPaymentOutcome GetBucketRequestPayment(const Model::GetBucketRequestPaymentRequest& request) const; /** *

Returns the request payment configuration of a bucket. To use this version of * the operation, you must be the bucket owner. For more information, see Requester * Pays Buckets.

The following operations are related to * GetBucketRequestPayment:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketRequestPaymentOutcomeCallable GetBucketRequestPaymentCallable(const Model::GetBucketRequestPaymentRequest& request) const; /** *

Returns the request payment configuration of a bucket. To use this version of * the operation, you must be the bucket owner. For more information, see Requester * Pays Buckets.

The following operations are related to * GetBucketRequestPayment:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketRequestPaymentAsync(const Model::GetBucketRequestPaymentRequest& request, const GetBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the tag set associated with the bucket.

To use this operation, * you must have permission to perform the s3:GetBucketTagging action. * By default, the bucket owner has this permission and can grant this permission * to others.

GetBucketTagging has the following special * error:

  • Error code: NoSuchTagSetError

      *
    • Description: There is no tag set associated with the bucket.

    • *

The following operations are related to * GetBucketTagging:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketTaggingOutcome GetBucketTagging(const Model::GetBucketTaggingRequest& request) const; /** *

Returns the tag set associated with the bucket.

To use this operation, * you must have permission to perform the s3:GetBucketTagging action. * By default, the bucket owner has this permission and can grant this permission * to others.

GetBucketTagging has the following special * error:

  • Error code: NoSuchTagSetError

      *
    • Description: There is no tag set associated with the bucket.

    • *

The following operations are related to * GetBucketTagging:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketTaggingOutcomeCallable GetBucketTaggingCallable(const Model::GetBucketTaggingRequest& request) const; /** *

Returns the tag set associated with the bucket.

To use this operation, * you must have permission to perform the s3:GetBucketTagging action. * By default, the bucket owner has this permission and can grant this permission * to others.

GetBucketTagging has the following special * error:

  • Error code: NoSuchTagSetError

      *
    • Description: There is no tag set associated with the bucket.

    • *

The following operations are related to * GetBucketTagging:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketTaggingAsync(const Model::GetBucketTaggingRequest& request, const GetBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the versioning state of a bucket.

To retrieve the versioning * state of a bucket, you must be the bucket owner.

This implementation also * returns the MFA Delete status of the versioning state. If the MFA Delete status * is enabled, the bucket owner must use an authentication device to * change the versioning state of the bucket.

The following operations are * related to GetBucketVersioning:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketVersioningOutcome GetBucketVersioning(const Model::GetBucketVersioningRequest& request) const; /** *

Returns the versioning state of a bucket.

To retrieve the versioning * state of a bucket, you must be the bucket owner.

This implementation also * returns the MFA Delete status of the versioning state. If the MFA Delete status * is enabled, the bucket owner must use an authentication device to * change the versioning state of the bucket.

The following operations are * related to GetBucketVersioning:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketVersioningOutcomeCallable GetBucketVersioningCallable(const Model::GetBucketVersioningRequest& request) const; /** *

Returns the versioning state of a bucket.

To retrieve the versioning * state of a bucket, you must be the bucket owner.

This implementation also * returns the MFA Delete status of the versioning state. If the MFA Delete status * is enabled, the bucket owner must use an authentication device to * change the versioning state of the bucket.

The following operations are * related to GetBucketVersioning:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketVersioningAsync(const Model::GetBucketVersioningRequest& request, const GetBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the website configuration for a bucket. To host website on Amazon S3, * you can configure a bucket as website by adding a website configuration. For * more information about hosting websites, see Hosting * Websites on Amazon S3.

This GET action requires the * S3:GetBucketWebsite permission. By default, only the bucket owner * can read the bucket website configuration. However, bucket owners can allow * other users to read the website configuration by writing a bucket policy * granting them the S3:GetBucketWebsite permission.

The * following operations are related to DeleteBucketWebsite:

See Also:

AWS * API Reference

*/ virtual Model::GetBucketWebsiteOutcome GetBucketWebsite(const Model::GetBucketWebsiteRequest& request) const; /** *

Returns the website configuration for a bucket. To host website on Amazon S3, * you can configure a bucket as website by adding a website configuration. For * more information about hosting websites, see Hosting * Websites on Amazon S3.

This GET action requires the * S3:GetBucketWebsite permission. By default, only the bucket owner * can read the bucket website configuration. However, bucket owners can allow * other users to read the website configuration by writing a bucket policy * granting them the S3:GetBucketWebsite permission.

The * following operations are related to DeleteBucketWebsite:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetBucketWebsiteOutcomeCallable GetBucketWebsiteCallable(const Model::GetBucketWebsiteRequest& request) const; /** *

Returns the website configuration for a bucket. To host website on Amazon S3, * you can configure a bucket as website by adding a website configuration. For * more information about hosting websites, see Hosting * Websites on Amazon S3.

This GET action requires the * S3:GetBucketWebsite permission. By default, only the bucket owner * can read the bucket website configuration. However, bucket owners can allow * other users to read the website configuration by writing a bucket policy * granting them the S3:GetBucketWebsite permission.

The * following operations are related to DeleteBucketWebsite:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetBucketWebsiteAsync(const Model::GetBucketWebsiteRequest& request, const GetBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Retrieves objects from Amazon S3. To use GET, you must have * READ access to the object. If you grant READ access to * the anonymous user, you can return the object without using an authorization * header.

An Amazon S3 bucket has no directory hierarchy such as you would * find in a typical computer file system. You can, however, create a logical * hierarchy by using object key names that imply a folder structure. For example, * instead of naming an object sample.jpg, you can name it * photos/2006/February/sample.jpg.

To get an object from such * a logical hierarchy, specify the full key name for the object in the * GET operation. For a virtual hosted-style request example, if you * have the object photos/2006/February/sample.jpg, specify the * resource as /photos/2006/February/sample.jpg. For a path-style * request example, if you have the object * photos/2006/February/sample.jpg in the bucket named * examplebucket, specify the resource as * /examplebucket/photos/2006/February/sample.jpg. For more * information about request types, see HTTP * Host Header Bucket Specification.

To distribute large files to many * people, you can save bandwidth costs by using BitTorrent. For more information, * see Amazon S3 * Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

*

If the object you are retrieving is stored in the S3 Glacier or S3 Glacier * Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 * Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you * must first restore a copy using RestoreObject. * Otherwise, this action returns an InvalidObjectStateError error. * For information about restoring archived objects, see Restoring * Archived Objects.

Encryption request headers, like * x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with KMS keys (SSE-KMS) or * server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your * object does use these types of keys, you’ll get an HTTP 400 BadRequest * error.

If you encrypt an object by using server-side encryption with * customer-provided encryption keys (SSE-C) when you store the object in Amazon * S3, then when you GET the object, you must use the following headers:

    *
  • x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

For more * information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys).

Assuming you * have the relevant permission to read object tags, the response also returns the * x-amz-tagging-count header that provides the count of number of * tags associated with the object. You can use GetObjectTagging * to retrieve the tag set associated with an object.

Permissions *

You need the relevant read object (or version) permission for this * operation. For more information, see Specifying * Permissions in a Policy. If the object you request does not exist, the error * Amazon S3 returns depends on whether you also have the * s3:ListBucket permission.

  • If you have the * s3:ListBucket permission on the bucket, Amazon S3 will return an * HTTP status code 404 ("no such key") error.

  • If you don’t have * the s3:ListBucket permission, Amazon S3 will return an HTTP status * code 403 ("access denied") error.

Versioning

*

By default, the GET action returns the current version of an object. To * return a different version, use the versionId subresource.

*
  • You need the s3:GetObjectVersion permission to * access a specific version of an object.

  • If the current * version of the object is a delete marker, Amazon S3 behaves as if the object was * deleted and includes x-amz-delete-marker: true in the response.

    *

For more information about versioning, see PutBucketVersioning. *

Overriding Response Header Values

There are times when * you want to override certain response header values in a GET response. For * example, you might override the Content-Disposition response header value in * your GET request.

You can override values for a set of response headers * using the following query parameters. These response header values are sent only * on a successful request, that is, when status code 200 OK is returned. The set * of headers you can override using these parameters is a subset of the headers * that Amazon S3 accepts when you create an object. The response headers that you * can override for the GET response are Content-Type, * Content-Language, Expires, Cache-Control, * Content-Disposition, and Content-Encoding. To override * these header values in the GET response, you use the following request * parameters.

You must sign the request, either using an * Authorization header or a presigned URL, when using these parameters. They * cannot be used with an unsigned (anonymous) request.

  • * response-content-type

  • * response-content-language

  • * response-expires

  • * response-cache-control

  • * response-content-disposition

  • * response-content-encoding

Additional * Considerations about Request Headers

If both of the * If-Match and If-Unmodified-Since headers are present * in the request as follows: If-Match condition evaluates to * true, and; If-Unmodified-Since condition evaluates to * false; then, S3 returns 200 OK and the data requested.

If * both of the If-None-Match and If-Modified-Since * headers are present in the request as follows: If-None-Match * condition evaluates to false, and; If-Modified-Since * condition evaluates to true; then, S3 returns 304 Not Modified * response code.

For more information about conditional requests, see RFC 7232.

The following * operations are related to GetObject:

See Also:

AWS API * Reference

*/ virtual Model::GetObjectOutcome GetObject(const Model::GetObjectRequest& request) const; /** *

Retrieves objects from Amazon S3. To use GET, you must have * READ access to the object. If you grant READ access to * the anonymous user, you can return the object without using an authorization * header.

An Amazon S3 bucket has no directory hierarchy such as you would * find in a typical computer file system. You can, however, create a logical * hierarchy by using object key names that imply a folder structure. For example, * instead of naming an object sample.jpg, you can name it * photos/2006/February/sample.jpg.

To get an object from such * a logical hierarchy, specify the full key name for the object in the * GET operation. For a virtual hosted-style request example, if you * have the object photos/2006/February/sample.jpg, specify the * resource as /photos/2006/February/sample.jpg. For a path-style * request example, if you have the object * photos/2006/February/sample.jpg in the bucket named * examplebucket, specify the resource as * /examplebucket/photos/2006/February/sample.jpg. For more * information about request types, see HTTP * Host Header Bucket Specification.

To distribute large files to many * people, you can save bandwidth costs by using BitTorrent. For more information, * see Amazon S3 * Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

*

If the object you are retrieving is stored in the S3 Glacier or S3 Glacier * Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 * Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you * must first restore a copy using RestoreObject. * Otherwise, this action returns an InvalidObjectStateError error. * For information about restoring archived objects, see Restoring * Archived Objects.

Encryption request headers, like * x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with KMS keys (SSE-KMS) or * server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your * object does use these types of keys, you’ll get an HTTP 400 BadRequest * error.

If you encrypt an object by using server-side encryption with * customer-provided encryption keys (SSE-C) when you store the object in Amazon * S3, then when you GET the object, you must use the following headers:

    *
  • x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

For more * information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys).

Assuming you * have the relevant permission to read object tags, the response also returns the * x-amz-tagging-count header that provides the count of number of * tags associated with the object. You can use GetObjectTagging * to retrieve the tag set associated with an object.

Permissions *

You need the relevant read object (or version) permission for this * operation. For more information, see Specifying * Permissions in a Policy. If the object you request does not exist, the error * Amazon S3 returns depends on whether you also have the * s3:ListBucket permission.

  • If you have the * s3:ListBucket permission on the bucket, Amazon S3 will return an * HTTP status code 404 ("no such key") error.

  • If you don’t have * the s3:ListBucket permission, Amazon S3 will return an HTTP status * code 403 ("access denied") error.

Versioning

*

By default, the GET action returns the current version of an object. To * return a different version, use the versionId subresource.

*
  • You need the s3:GetObjectVersion permission to * access a specific version of an object.

  • If the current * version of the object is a delete marker, Amazon S3 behaves as if the object was * deleted and includes x-amz-delete-marker: true in the response.

    *

For more information about versioning, see PutBucketVersioning. *

Overriding Response Header Values

There are times when * you want to override certain response header values in a GET response. For * example, you might override the Content-Disposition response header value in * your GET request.

You can override values for a set of response headers * using the following query parameters. These response header values are sent only * on a successful request, that is, when status code 200 OK is returned. The set * of headers you can override using these parameters is a subset of the headers * that Amazon S3 accepts when you create an object. The response headers that you * can override for the GET response are Content-Type, * Content-Language, Expires, Cache-Control, * Content-Disposition, and Content-Encoding. To override * these header values in the GET response, you use the following request * parameters.

You must sign the request, either using an * Authorization header or a presigned URL, when using these parameters. They * cannot be used with an unsigned (anonymous) request.

  • * response-content-type

  • * response-content-language

  • * response-expires

  • * response-cache-control

  • * response-content-disposition

  • * response-content-encoding

Additional * Considerations about Request Headers

If both of the * If-Match and If-Unmodified-Since headers are present * in the request as follows: If-Match condition evaluates to * true, and; If-Unmodified-Since condition evaluates to * false; then, S3 returns 200 OK and the data requested.

If * both of the If-None-Match and If-Modified-Since * headers are present in the request as follows: If-None-Match * condition evaluates to false, and; If-Modified-Since * condition evaluates to true; then, S3 returns 304 Not Modified * response code.

For more information about conditional requests, see RFC 7232.

The following * operations are related to GetObject:

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetObjectOutcomeCallable GetObjectCallable(const Model::GetObjectRequest& request) const; /** *

Retrieves objects from Amazon S3. To use GET, you must have * READ access to the object. If you grant READ access to * the anonymous user, you can return the object without using an authorization * header.

An Amazon S3 bucket has no directory hierarchy such as you would * find in a typical computer file system. You can, however, create a logical * hierarchy by using object key names that imply a folder structure. For example, * instead of naming an object sample.jpg, you can name it * photos/2006/February/sample.jpg.

To get an object from such * a logical hierarchy, specify the full key name for the object in the * GET operation. For a virtual hosted-style request example, if you * have the object photos/2006/February/sample.jpg, specify the * resource as /photos/2006/February/sample.jpg. For a path-style * request example, if you have the object * photos/2006/February/sample.jpg in the bucket named * examplebucket, specify the resource as * /examplebucket/photos/2006/February/sample.jpg. For more * information about request types, see HTTP * Host Header Bucket Specification.

To distribute large files to many * people, you can save bandwidth costs by using BitTorrent. For more information, * see Amazon S3 * Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

*

If the object you are retrieving is stored in the S3 Glacier or S3 Glacier * Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 * Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you * must first restore a copy using RestoreObject. * Otherwise, this action returns an InvalidObjectStateError error. * For information about restoring archived objects, see Restoring * Archived Objects.

Encryption request headers, like * x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with KMS keys (SSE-KMS) or * server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your * object does use these types of keys, you’ll get an HTTP 400 BadRequest * error.

If you encrypt an object by using server-side encryption with * customer-provided encryption keys (SSE-C) when you store the object in Amazon * S3, then when you GET the object, you must use the following headers:

    *
  • x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

For more * information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys).

Assuming you * have the relevant permission to read object tags, the response also returns the * x-amz-tagging-count header that provides the count of number of * tags associated with the object. You can use GetObjectTagging * to retrieve the tag set associated with an object.

Permissions *

You need the relevant read object (or version) permission for this * operation. For more information, see Specifying * Permissions in a Policy. If the object you request does not exist, the error * Amazon S3 returns depends on whether you also have the * s3:ListBucket permission.

  • If you have the * s3:ListBucket permission on the bucket, Amazon S3 will return an * HTTP status code 404 ("no such key") error.

  • If you don’t have * the s3:ListBucket permission, Amazon S3 will return an HTTP status * code 403 ("access denied") error.

Versioning

*

By default, the GET action returns the current version of an object. To * return a different version, use the versionId subresource.

*
  • You need the s3:GetObjectVersion permission to * access a specific version of an object.

  • If the current * version of the object is a delete marker, Amazon S3 behaves as if the object was * deleted and includes x-amz-delete-marker: true in the response.

    *

For more information about versioning, see PutBucketVersioning. *

Overriding Response Header Values

There are times when * you want to override certain response header values in a GET response. For * example, you might override the Content-Disposition response header value in * your GET request.

You can override values for a set of response headers * using the following query parameters. These response header values are sent only * on a successful request, that is, when status code 200 OK is returned. The set * of headers you can override using these parameters is a subset of the headers * that Amazon S3 accepts when you create an object. The response headers that you * can override for the GET response are Content-Type, * Content-Language, Expires, Cache-Control, * Content-Disposition, and Content-Encoding. To override * these header values in the GET response, you use the following request * parameters.

You must sign the request, either using an * Authorization header or a presigned URL, when using these parameters. They * cannot be used with an unsigned (anonymous) request.

  • * response-content-type

  • * response-content-language

  • * response-expires

  • * response-cache-control

  • * response-content-disposition

  • * response-content-encoding

Additional * Considerations about Request Headers

If both of the * If-Match and If-Unmodified-Since headers are present * in the request as follows: If-Match condition evaluates to * true, and; If-Unmodified-Since condition evaluates to * false; then, S3 returns 200 OK and the data requested.

If * both of the If-None-Match and If-Modified-Since * headers are present in the request as follows: If-None-Match * condition evaluates to false, and; If-Modified-Since * condition evaluates to true; then, S3 returns 304 Not Modified * response code.

For more information about conditional requests, see RFC 7232.

The following * operations are related to GetObject:

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetObjectAsync(const Model::GetObjectRequest& request, const GetObjectResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the access control list (ACL) of an object. To use this operation, * you must have READ_ACP access to the object.

This action is * not supported by Amazon S3 on Outposts.

Versioning

By * default, GET returns ACL information about the current version of an object. To * return ACL information about a different version, use the versionId * subresource.

The following operations are related to * GetObjectAcl:

See Also:

AWS API * Reference

*/ virtual Model::GetObjectAclOutcome GetObjectAcl(const Model::GetObjectAclRequest& request) const; /** *

Returns the access control list (ACL) of an object. To use this operation, * you must have READ_ACP access to the object.

This action is * not supported by Amazon S3 on Outposts.

Versioning

By * default, GET returns ACL information about the current version of an object. To * return ACL information about a different version, use the versionId * subresource.

The following operations are related to * GetObjectAcl:

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetObjectAclOutcomeCallable GetObjectAclCallable(const Model::GetObjectAclRequest& request) const; /** *

Returns the access control list (ACL) of an object. To use this operation, * you must have READ_ACP access to the object.

This action is * not supported by Amazon S3 on Outposts.

Versioning

By * default, GET returns ACL information about the current version of an object. To * return ACL information about a different version, use the versionId * subresource.

The following operations are related to * GetObjectAcl:

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetObjectAclAsync(const Model::GetObjectAclRequest& request, const GetObjectAclResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets an object's current Legal Hold status. For more information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

*/ virtual Model::GetObjectLegalHoldOutcome GetObjectLegalHold(const Model::GetObjectLegalHoldRequest& request) const; /** *

Gets an object's current Legal Hold status. For more information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetObjectLegalHoldOutcomeCallable GetObjectLegalHoldCallable(const Model::GetObjectLegalHoldRequest& request) const; /** *

Gets an object's current Legal Hold status. For more information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetObjectLegalHoldAsync(const Model::GetObjectLegalHoldRequest& request, const GetObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the Object Lock configuration for a bucket. The rule specified in the * Object Lock configuration will be applied by default to every new object placed * in the specified bucket. For more information, see Locking * Objects.

See Also:

AWS * API Reference

*/ virtual Model::GetObjectLockConfigurationOutcome GetObjectLockConfiguration(const Model::GetObjectLockConfigurationRequest& request) const; /** *

Gets the Object Lock configuration for a bucket. The rule specified in the * Object Lock configuration will be applied by default to every new object placed * in the specified bucket. For more information, see Locking * Objects.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetObjectLockConfigurationOutcomeCallable GetObjectLockConfigurationCallable(const Model::GetObjectLockConfigurationRequest& request) const; /** *

Gets the Object Lock configuration for a bucket. The rule specified in the * Object Lock configuration will be applied by default to every new object placed * in the specified bucket. For more information, see Locking * Objects.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetObjectLockConfigurationAsync(const Model::GetObjectLockConfigurationRequest& request, const GetObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Retrieves an object's retention settings. For more information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

*/ virtual Model::GetObjectRetentionOutcome GetObjectRetention(const Model::GetObjectRetentionRequest& request) const; /** *

Retrieves an object's retention settings. For more information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetObjectRetentionOutcomeCallable GetObjectRetentionCallable(const Model::GetObjectRetentionRequest& request) const; /** *

Retrieves an object's retention settings. For more information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetObjectRetentionAsync(const Model::GetObjectRetentionRequest& request, const GetObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the tag-set of an object. You send the GET request against the * tagging subresource associated with the object.

To use this operation, * you must have permission to perform the s3:GetObjectTagging action. * By default, the GET action returns information about current version of an * object. For a versioned bucket, you can have multiple versions of an object in * your bucket. To retrieve tags of any other version, use the versionId query * parameter. You also need permission for the * s3:GetObjectVersionTagging action.

By default, the bucket * owner has this permission and can grant this permission to others.

For * information about the Amazon S3 object tagging feature, see Object * Tagging.

The following action is related to * GetObjectTagging:

See Also:

AWS * API Reference

*/ virtual Model::GetObjectTaggingOutcome GetObjectTagging(const Model::GetObjectTaggingRequest& request) const; /** *

Returns the tag-set of an object. You send the GET request against the * tagging subresource associated with the object.

To use this operation, * you must have permission to perform the s3:GetObjectTagging action. * By default, the GET action returns information about current version of an * object. For a versioned bucket, you can have multiple versions of an object in * your bucket. To retrieve tags of any other version, use the versionId query * parameter. You also need permission for the * s3:GetObjectVersionTagging action.

By default, the bucket * owner has this permission and can grant this permission to others.

For * information about the Amazon S3 object tagging feature, see Object * Tagging.

The following action is related to * GetObjectTagging:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetObjectTaggingOutcomeCallable GetObjectTaggingCallable(const Model::GetObjectTaggingRequest& request) const; /** *

Returns the tag-set of an object. You send the GET request against the * tagging subresource associated with the object.

To use this operation, * you must have permission to perform the s3:GetObjectTagging action. * By default, the GET action returns information about current version of an * object. For a versioned bucket, you can have multiple versions of an object in * your bucket. To retrieve tags of any other version, use the versionId query * parameter. You also need permission for the * s3:GetObjectVersionTagging action.

By default, the bucket * owner has this permission and can grant this permission to others.

For * information about the Amazon S3 object tagging feature, see Object * Tagging.

The following action is related to * GetObjectTagging:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetObjectTaggingAsync(const Model::GetObjectTaggingRequest& request, const GetObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns torrent files from a bucket. BitTorrent can save you bandwidth when * you're distributing large files. For more information about BitTorrent, see Using * BitTorrent with Amazon S3.

You can get torrent only for * objects that are less than 5 GB in size, and that are not encrypted using * server-side encryption with a customer-provided encryption key.

*

To use GET, you must have READ access to the object.

This action is * not supported by Amazon S3 on Outposts.

The following action is related * to GetObjectTorrent:

See Also:

AWS * API Reference

*/ virtual Model::GetObjectTorrentOutcome GetObjectTorrent(const Model::GetObjectTorrentRequest& request) const; /** *

Returns torrent files from a bucket. BitTorrent can save you bandwidth when * you're distributing large files. For more information about BitTorrent, see Using * BitTorrent with Amazon S3.

You can get torrent only for * objects that are less than 5 GB in size, and that are not encrypted using * server-side encryption with a customer-provided encryption key.

*

To use GET, you must have READ access to the object.

This action is * not supported by Amazon S3 on Outposts.

The following action is related * to GetObjectTorrent:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetObjectTorrentOutcomeCallable GetObjectTorrentCallable(const Model::GetObjectTorrentRequest& request) const; /** *

Returns torrent files from a bucket. BitTorrent can save you bandwidth when * you're distributing large files. For more information about BitTorrent, see Using * BitTorrent with Amazon S3.

You can get torrent only for * objects that are less than 5 GB in size, and that are not encrypted using * server-side encryption with a customer-provided encryption key.

*

To use GET, you must have READ access to the object.

This action is * not supported by Amazon S3 on Outposts.

The following action is related * to GetObjectTorrent:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetObjectTorrentAsync(const Model::GetObjectTorrentRequest& request, const GetObjectTorrentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Retrieves the PublicAccessBlock configuration for an Amazon S3 * bucket. To use this operation, you must have the * s3:GetBucketPublicAccessBlock permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

When Amazon S3 evaluates the * PublicAccessBlock configuration for a bucket or an object, it * checks the PublicAccessBlock configuration for both the bucket (or * the bucket that contains the object) and the bucket owner's account. If the * PublicAccessBlock settings are different between the bucket and the * account, Amazon S3 uses the most restrictive combination of the bucket-level and * account-level settings.

For more information about when * Amazon S3 considers a bucket or an object public, see The * Meaning of "Public".

The following operations are related to * GetPublicAccessBlock:

See Also:

AWS * API Reference

*/ virtual Model::GetPublicAccessBlockOutcome GetPublicAccessBlock(const Model::GetPublicAccessBlockRequest& request) const; /** *

Retrieves the PublicAccessBlock configuration for an Amazon S3 * bucket. To use this operation, you must have the * s3:GetBucketPublicAccessBlock permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

When Amazon S3 evaluates the * PublicAccessBlock configuration for a bucket or an object, it * checks the PublicAccessBlock configuration for both the bucket (or * the bucket that contains the object) and the bucket owner's account. If the * PublicAccessBlock settings are different between the bucket and the * account, Amazon S3 uses the most restrictive combination of the bucket-level and * account-level settings.

For more information about when * Amazon S3 considers a bucket or an object public, see The * Meaning of "Public".

The following operations are related to * GetPublicAccessBlock:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetPublicAccessBlockOutcomeCallable GetPublicAccessBlockCallable(const Model::GetPublicAccessBlockRequest& request) const; /** *

Retrieves the PublicAccessBlock configuration for an Amazon S3 * bucket. To use this operation, you must have the * s3:GetBucketPublicAccessBlock permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

When Amazon S3 evaluates the * PublicAccessBlock configuration for a bucket or an object, it * checks the PublicAccessBlock configuration for both the bucket (or * the bucket that contains the object) and the bucket owner's account. If the * PublicAccessBlock settings are different between the bucket and the * account, Amazon S3 uses the most restrictive combination of the bucket-level and * account-level settings.

For more information about when * Amazon S3 considers a bucket or an object public, see The * Meaning of "Public".

The following operations are related to * GetPublicAccessBlock:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetPublicAccessBlockAsync(const Model::GetPublicAccessBlockRequest& request, const GetPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action is useful to determine if a bucket exists and you have permission * to access it. The action returns a 200 OK if the bucket exists and * you have permission to access it.

If the bucket does not exist or you do * not have permission to access it, the HEAD request returns a * generic 404 Not Found or 403 Forbidden code. A message * body is not included, so you cannot determine the exception beyond these error * codes.

To use this operation, you must have permissions to perform the * s3:ListBucket action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

To use this API * against an access point, you must provide the alias of the access point in place * of the bucket name or specify the access point ARN. When using the access point * ARN, you must direct requests to the access point hostname. The access point * hostname takes the form * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the * Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For * more information see, Using * access points.

See Also:

AWS API * Reference

*/ virtual Model::HeadBucketOutcome HeadBucket(const Model::HeadBucketRequest& request) const; /** *

This action is useful to determine if a bucket exists and you have permission * to access it. The action returns a 200 OK if the bucket exists and * you have permission to access it.

If the bucket does not exist or you do * not have permission to access it, the HEAD request returns a * generic 404 Not Found or 403 Forbidden code. A message * body is not included, so you cannot determine the exception beyond these error * codes.

To use this operation, you must have permissions to perform the * s3:ListBucket action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

To use this API * against an access point, you must provide the alias of the access point in place * of the bucket name or specify the access point ARN. When using the access point * ARN, you must direct requests to the access point hostname. The access point * hostname takes the form * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the * Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For * more information see, Using * access points.

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::HeadBucketOutcomeCallable HeadBucketCallable(const Model::HeadBucketRequest& request) const; /** *

This action is useful to determine if a bucket exists and you have permission * to access it. The action returns a 200 OK if the bucket exists and * you have permission to access it.

If the bucket does not exist or you do * not have permission to access it, the HEAD request returns a * generic 404 Not Found or 403 Forbidden code. A message * body is not included, so you cannot determine the exception beyond these error * codes.

To use this operation, you must have permissions to perform the * s3:ListBucket action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

To use this API * against an access point, you must provide the alias of the access point in place * of the bucket name or specify the access point ARN. When using the access point * ARN, you must direct requests to the access point hostname. The access point * hostname takes the form * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the * Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For * more information see, Using * access points.

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void HeadBucketAsync(const Model::HeadBucketRequest& request, const HeadBucketResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

The HEAD action retrieves metadata from an object without returning the * object itself. This action is useful if you're only interested in an object's * metadata. To use HEAD, you must have READ access to the object.

A * HEAD request has the same options as a GET action on * an object. The response is identical to the GET response except * that there is no response body. Because of this, if the HEAD * request generates an error, it returns a generic 404 Not Found or * 403 Forbidden code. It is not possible to retrieve the exact * exception beyond these error codes.

If you encrypt an object by using * server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you retrieve the metadata from the * object, you must use the following headers:

  • *

    x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

For more * information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys).

  • *

    Encryption request headers, like x-amz-server-side-encryption, * should not be sent for GET requests if your object uses server-side encryption * with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed * encryption keys (SSE-S3). If your object does use these types of keys, you’ll * get an HTTP 400 BadRequest error.

  • The last modified property * in this case is the creation date of the object.

*

Request headers are limited to 8 KB in size. For more information, see Common * Request Headers.

Consider the following when using request * headers:

  • Consideration 1 – If both of the * If-Match and If-Unmodified-Since headers are present * in the request as follows:

    • If-Match condition * evaluates to true, and;

    • * If-Unmodified-Since condition evaluates to false;

      *

    Then Amazon S3 returns 200 OK and the data * requested.

  • Consideration 2 – If both of the * If-None-Match and If-Modified-Since headers are * present in the request as follows:

    • If-None-Match * condition evaluates to false, and;

    • * If-Modified-Since condition evaluates to true;

      *

    Then Amazon S3 returns the 304 Not Modified response * code.

For more information about conditional requests, see RFC 7232.

* Permissions

You need the relevant read object (or version) * permission for this operation. For more information, see Specifying * Permissions in a Policy. If the object you request does not exist, the error * Amazon S3 returns depends on whether you also have the s3:ListBucket * permission.

  • If you have the s3:ListBucket * permission on the bucket, Amazon S3 returns an HTTP status code 404 ("no such * key") error.

  • If you don’t have the s3:ListBucket * permission, Amazon S3 returns an HTTP status code 403 ("access denied") * error.

The following action is related to * HeadObject:

See Also:

AWS API * Reference

*/ virtual Model::HeadObjectOutcome HeadObject(const Model::HeadObjectRequest& request) const; /** *

The HEAD action retrieves metadata from an object without returning the * object itself. This action is useful if you're only interested in an object's * metadata. To use HEAD, you must have READ access to the object.

A * HEAD request has the same options as a GET action on * an object. The response is identical to the GET response except * that there is no response body. Because of this, if the HEAD * request generates an error, it returns a generic 404 Not Found or * 403 Forbidden code. It is not possible to retrieve the exact * exception beyond these error codes.

If you encrypt an object by using * server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you retrieve the metadata from the * object, you must use the following headers:

  • *

    x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

For more * information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys).

  • *

    Encryption request headers, like x-amz-server-side-encryption, * should not be sent for GET requests if your object uses server-side encryption * with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed * encryption keys (SSE-S3). If your object does use these types of keys, you’ll * get an HTTP 400 BadRequest error.

  • The last modified property * in this case is the creation date of the object.

*

Request headers are limited to 8 KB in size. For more information, see Common * Request Headers.

Consider the following when using request * headers:

  • Consideration 1 – If both of the * If-Match and If-Unmodified-Since headers are present * in the request as follows:

    • If-Match condition * evaluates to true, and;

    • * If-Unmodified-Since condition evaluates to false;

      *

    Then Amazon S3 returns 200 OK and the data * requested.

  • Consideration 2 – If both of the * If-None-Match and If-Modified-Since headers are * present in the request as follows:

    • If-None-Match * condition evaluates to false, and;

    • * If-Modified-Since condition evaluates to true;

      *

    Then Amazon S3 returns the 304 Not Modified response * code.

For more information about conditional requests, see RFC 7232.

* Permissions

You need the relevant read object (or version) * permission for this operation. For more information, see Specifying * Permissions in a Policy. If the object you request does not exist, the error * Amazon S3 returns depends on whether you also have the s3:ListBucket * permission.

  • If you have the s3:ListBucket * permission on the bucket, Amazon S3 returns an HTTP status code 404 ("no such * key") error.

  • If you don’t have the s3:ListBucket * permission, Amazon S3 returns an HTTP status code 403 ("access denied") * error.

The following action is related to * HeadObject:

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::HeadObjectOutcomeCallable HeadObjectCallable(const Model::HeadObjectRequest& request) const; /** *

The HEAD action retrieves metadata from an object without returning the * object itself. This action is useful if you're only interested in an object's * metadata. To use HEAD, you must have READ access to the object.

A * HEAD request has the same options as a GET action on * an object. The response is identical to the GET response except * that there is no response body. Because of this, if the HEAD * request generates an error, it returns a generic 404 Not Found or * 403 Forbidden code. It is not possible to retrieve the exact * exception beyond these error codes.

If you encrypt an object by using * server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you retrieve the metadata from the * object, you must use the following headers:

  • *

    x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

For more * information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys).

  • *

    Encryption request headers, like x-amz-server-side-encryption, * should not be sent for GET requests if your object uses server-side encryption * with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed * encryption keys (SSE-S3). If your object does use these types of keys, you’ll * get an HTTP 400 BadRequest error.

  • The last modified property * in this case is the creation date of the object.

*

Request headers are limited to 8 KB in size. For more information, see Common * Request Headers.

Consider the following when using request * headers:

  • Consideration 1 – If both of the * If-Match and If-Unmodified-Since headers are present * in the request as follows:

    • If-Match condition * evaluates to true, and;

    • * If-Unmodified-Since condition evaluates to false;

      *

    Then Amazon S3 returns 200 OK and the data * requested.

  • Consideration 2 – If both of the * If-None-Match and If-Modified-Since headers are * present in the request as follows:

    • If-None-Match * condition evaluates to false, and;

    • * If-Modified-Since condition evaluates to true;

      *

    Then Amazon S3 returns the 304 Not Modified response * code.

For more information about conditional requests, see RFC 7232.

* Permissions

You need the relevant read object (or version) * permission for this operation. For more information, see Specifying * Permissions in a Policy. If the object you request does not exist, the error * Amazon S3 returns depends on whether you also have the s3:ListBucket * permission.

  • If you have the s3:ListBucket * permission on the bucket, Amazon S3 returns an HTTP status code 404 ("no such * key") error.

  • If you don’t have the s3:ListBucket * permission, Amazon S3 returns an HTTP status code 403 ("access denied") * error.

The following action is related to * HeadObject:

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void HeadObjectAsync(const Model::HeadObjectRequest& request, const HeadObjectResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the analytics configurations for the bucket. You can have up to 1,000 * analytics configurations per bucket.

This action supports list pagination * and does not return more than 100 configurations at a time. You should always * check the IsTruncated element in the response. If there are no more * configurations to list, IsTruncated is set to false. If there are * more configurations to list, IsTruncated is set to true, and there * will be a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to GET * the next page.

To use this operation, you must have permissions to * perform the s3:GetAnalyticsConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis.

The following operations are * related to ListBucketAnalyticsConfigurations:

See Also:

AWS * API Reference

*/ virtual Model::ListBucketAnalyticsConfigurationsOutcome ListBucketAnalyticsConfigurations(const Model::ListBucketAnalyticsConfigurationsRequest& request) const; /** *

Lists the analytics configurations for the bucket. You can have up to 1,000 * analytics configurations per bucket.

This action supports list pagination * and does not return more than 100 configurations at a time. You should always * check the IsTruncated element in the response. If there are no more * configurations to list, IsTruncated is set to false. If there are * more configurations to list, IsTruncated is set to true, and there * will be a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to GET * the next page.

To use this operation, you must have permissions to * perform the s3:GetAnalyticsConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis.

The following operations are * related to ListBucketAnalyticsConfigurations:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListBucketAnalyticsConfigurationsOutcomeCallable ListBucketAnalyticsConfigurationsCallable(const Model::ListBucketAnalyticsConfigurationsRequest& request) const; /** *

Lists the analytics configurations for the bucket. You can have up to 1,000 * analytics configurations per bucket.

This action supports list pagination * and does not return more than 100 configurations at a time. You should always * check the IsTruncated element in the response. If there are no more * configurations to list, IsTruncated is set to false. If there are * more configurations to list, IsTruncated is set to true, and there * will be a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to GET * the next page.

To use this operation, you must have permissions to * perform the s3:GetAnalyticsConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * Amazon S3 analytics feature, see Amazon * S3 Analytics – Storage Class Analysis.

The following operations are * related to ListBucketAnalyticsConfigurations:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListBucketAnalyticsConfigurationsAsync(const Model::ListBucketAnalyticsConfigurationsRequest& request, const ListBucketAnalyticsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

*

The S3 Intelligent-Tiering storage class is designed to optimize storage * costs by automatically moving data to the most cost-effective storage access * tier, without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * ListBucketIntelligentTieringConfigurations include:

See Also:

AWS * API Reference

*/ virtual Model::ListBucketIntelligentTieringConfigurationsOutcome ListBucketIntelligentTieringConfigurations(const Model::ListBucketIntelligentTieringConfigurationsRequest& request) const; /** *

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

*

The S3 Intelligent-Tiering storage class is designed to optimize storage * costs by automatically moving data to the most cost-effective storage access * tier, without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * ListBucketIntelligentTieringConfigurations include:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListBucketIntelligentTieringConfigurationsOutcomeCallable ListBucketIntelligentTieringConfigurationsCallable(const Model::ListBucketIntelligentTieringConfigurationsRequest& request) const; /** *

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

*

The S3 Intelligent-Tiering storage class is designed to optimize storage * costs by automatically moving data to the most cost-effective storage access * tier, without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * ListBucketIntelligentTieringConfigurations include:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListBucketIntelligentTieringConfigurationsAsync(const Model::ListBucketIntelligentTieringConfigurationsRequest& request, const ListBucketIntelligentTieringConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a list of inventory configurations for the bucket. You can have up to * 1,000 analytics configurations per bucket.

This action supports list * pagination and does not return more than 100 configurations at a time. Always * check the IsTruncated element in the response. If there are no more * configurations to list, IsTruncated is set to false. If there are * more configurations to list, IsTruncated is set to true, and there * is a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to GET * the next page.

To use this operation, you must have permissions to * perform the s3:GetInventoryConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory

The following operations are related to * ListBucketInventoryConfigurations:

See Also:

AWS * API Reference

*/ virtual Model::ListBucketInventoryConfigurationsOutcome ListBucketInventoryConfigurations(const Model::ListBucketInventoryConfigurationsRequest& request) const; /** *

Returns a list of inventory configurations for the bucket. You can have up to * 1,000 analytics configurations per bucket.

This action supports list * pagination and does not return more than 100 configurations at a time. Always * check the IsTruncated element in the response. If there are no more * configurations to list, IsTruncated is set to false. If there are * more configurations to list, IsTruncated is set to true, and there * is a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to GET * the next page.

To use this operation, you must have permissions to * perform the s3:GetInventoryConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory

The following operations are related to * ListBucketInventoryConfigurations:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListBucketInventoryConfigurationsOutcomeCallable ListBucketInventoryConfigurationsCallable(const Model::ListBucketInventoryConfigurationsRequest& request) const; /** *

Returns a list of inventory configurations for the bucket. You can have up to * 1,000 analytics configurations per bucket.

This action supports list * pagination and does not return more than 100 configurations at a time. Always * check the IsTruncated element in the response. If there are no more * configurations to list, IsTruncated is set to false. If there are * more configurations to list, IsTruncated is set to true, and there * is a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to GET * the next page.

To use this operation, you must have permissions to * perform the s3:GetInventoryConfiguration action. The bucket owner * has this permission by default. The bucket owner can grant this permission to * others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * the Amazon S3 inventory feature, see Amazon * S3 Inventory

The following operations are related to * ListBucketInventoryConfigurations:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListBucketInventoryConfigurationsAsync(const Model::ListBucketInventoryConfigurationsRequest& request, const ListBucketInventoryConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the metrics configurations for the bucket. The metrics configurations * are only for the request metrics of the bucket and do not provide information on * daily storage metrics. You can have up to 1,000 configurations per bucket.

*

This action supports list pagination and does not return more than 100 * configurations at a time. Always check the IsTruncated element in * the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to * list, IsTruncated is set to true, and there is a value in * NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to * GET the next page.

To use this operation, you must have * permissions to perform the s3:GetMetricsConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For more information * about metrics configurations and CloudWatch request metrics, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to ListBucketMetricsConfigurations:

See Also:

AWS * API Reference

*/ virtual Model::ListBucketMetricsConfigurationsOutcome ListBucketMetricsConfigurations(const Model::ListBucketMetricsConfigurationsRequest& request) const; /** *

Lists the metrics configurations for the bucket. The metrics configurations * are only for the request metrics of the bucket and do not provide information on * daily storage metrics. You can have up to 1,000 configurations per bucket.

*

This action supports list pagination and does not return more than 100 * configurations at a time. Always check the IsTruncated element in * the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to * list, IsTruncated is set to true, and there is a value in * NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to * GET the next page.

To use this operation, you must have * permissions to perform the s3:GetMetricsConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For more information * about metrics configurations and CloudWatch request metrics, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to ListBucketMetricsConfigurations:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListBucketMetricsConfigurationsOutcomeCallable ListBucketMetricsConfigurationsCallable(const Model::ListBucketMetricsConfigurationsRequest& request) const; /** *

Lists the metrics configurations for the bucket. The metrics configurations * are only for the request metrics of the bucket and do not provide information on * daily storage metrics. You can have up to 1,000 configurations per bucket.

*

This action supports list pagination and does not return more than 100 * configurations at a time. Always check the IsTruncated element in * the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to * list, IsTruncated is set to true, and there is a value in * NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list * by passing the value in continuation-token in the request to * GET the next page.

To use this operation, you must have * permissions to perform the s3:GetMetricsConfiguration action. The * bucket owner has this permission by default. The bucket owner can grant this * permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For more information * about metrics configurations and CloudWatch request metrics, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to ListBucketMetricsConfigurations:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListBucketMetricsConfigurationsAsync(const Model::ListBucketMetricsConfigurationsRequest& request, const ListBucketMetricsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a list of all buckets owned by the authenticated sender of the * request.

See Also:

AWS API * Reference

*/ virtual Model::ListBucketsOutcome ListBuckets() const; /** *

Returns a list of all buckets owned by the authenticated sender of the * request.

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListBucketsOutcomeCallable ListBucketsCallable() const; /** *

Returns a list of all buckets owned by the authenticated sender of the * request.

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListBucketsAsync(const ListBucketsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action lists in-progress multipart uploads. An in-progress multipart * upload is a multipart upload that has been initiated using the Initiate * Multipart Upload request, but has not yet been completed or aborted.

This * action returns at most 1,000 multipart uploads in the response. 1,000 multipart * uploads is the maximum number of uploads a response can include, which is also * the default value. You can further limit the number of uploads in a response by * specifying the max-uploads parameter in the response. If additional * multipart uploads satisfy the list criteria, the response will contain an * IsTruncated element with the value true. To list the additional * multipart uploads, use the key-marker and * upload-id-marker request parameters.

In the response, the * uploads are sorted by key. If your application has initiated more than one * multipart upload using the same object key, then uploads in the response are * first sorted by key. Additionally, uploads are sorted in ascending order within * each key by the upload initiation time.

For more information on multipart * uploads, see Uploading * Objects Using Multipart Upload.

For information on permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

The following operations are related to * ListMultipartUploads:

See Also:

AWS * API Reference

*/ virtual Model::ListMultipartUploadsOutcome ListMultipartUploads(const Model::ListMultipartUploadsRequest& request) const; /** *

This action lists in-progress multipart uploads. An in-progress multipart * upload is a multipart upload that has been initiated using the Initiate * Multipart Upload request, but has not yet been completed or aborted.

This * action returns at most 1,000 multipart uploads in the response. 1,000 multipart * uploads is the maximum number of uploads a response can include, which is also * the default value. You can further limit the number of uploads in a response by * specifying the max-uploads parameter in the response. If additional * multipart uploads satisfy the list criteria, the response will contain an * IsTruncated element with the value true. To list the additional * multipart uploads, use the key-marker and * upload-id-marker request parameters.

In the response, the * uploads are sorted by key. If your application has initiated more than one * multipart upload using the same object key, then uploads in the response are * first sorted by key. Additionally, uploads are sorted in ascending order within * each key by the upload initiation time.

For more information on multipart * uploads, see Uploading * Objects Using Multipart Upload.

For information on permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

The following operations are related to * ListMultipartUploads:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListMultipartUploadsOutcomeCallable ListMultipartUploadsCallable(const Model::ListMultipartUploadsRequest& request) const; /** *

This action lists in-progress multipart uploads. An in-progress multipart * upload is a multipart upload that has been initiated using the Initiate * Multipart Upload request, but has not yet been completed or aborted.

This * action returns at most 1,000 multipart uploads in the response. 1,000 multipart * uploads is the maximum number of uploads a response can include, which is also * the default value. You can further limit the number of uploads in a response by * specifying the max-uploads parameter in the response. If additional * multipart uploads satisfy the list criteria, the response will contain an * IsTruncated element with the value true. To list the additional * multipart uploads, use the key-marker and * upload-id-marker request parameters.

In the response, the * uploads are sorted by key. If your application has initiated more than one * multipart upload using the same object key, then uploads in the response are * first sorted by key. Additionally, uploads are sorted in ascending order within * each key by the upload initiation time.

For more information on multipart * uploads, see Uploading * Objects Using Multipart Upload.

For information on permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

The following operations are related to * ListMultipartUploads:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListMultipartUploadsAsync(const Model::ListMultipartUploadsRequest& request, const ListMultipartUploadsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns metadata about all versions of the objects in a bucket. You can also * use request parameters as selection criteria to return metadata about a subset * of all the object versions.

To use this operation, you must * have permissions to perform the s3:ListBucketVersions action. Be * aware of the name difference.

A 200 OK response can * contain valid or invalid XML. Make sure to design your application to parse the * contents of the response and handle it appropriately.

To use this * operation, you must have READ access to the bucket.

This action is not * supported by Amazon S3 on Outposts.

The following operations are related * to ListObjectVersions:

See Also:

AWS * API Reference

*/ virtual Model::ListObjectVersionsOutcome ListObjectVersions(const Model::ListObjectVersionsRequest& request) const; /** *

Returns metadata about all versions of the objects in a bucket. You can also * use request parameters as selection criteria to return metadata about a subset * of all the object versions.

To use this operation, you must * have permissions to perform the s3:ListBucketVersions action. Be * aware of the name difference.

A 200 OK response can * contain valid or invalid XML. Make sure to design your application to parse the * contents of the response and handle it appropriately.

To use this * operation, you must have READ access to the bucket.

This action is not * supported by Amazon S3 on Outposts.

The following operations are related * to ListObjectVersions:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListObjectVersionsOutcomeCallable ListObjectVersionsCallable(const Model::ListObjectVersionsRequest& request) const; /** *

Returns metadata about all versions of the objects in a bucket. You can also * use request parameters as selection criteria to return metadata about a subset * of all the object versions.

To use this operation, you must * have permissions to perform the s3:ListBucketVersions action. Be * aware of the name difference.

A 200 OK response can * contain valid or invalid XML. Make sure to design your application to parse the * contents of the response and handle it appropriately.

To use this * operation, you must have READ access to the bucket.

This action is not * supported by Amazon S3 on Outposts.

The following operations are related * to ListObjectVersions:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListObjectVersionsAsync(const Model::ListObjectVersionsRequest& request, const ListObjectVersionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns some or all (up to 1,000) of the objects in a bucket. You can use the * request parameters as selection criteria to return a subset of the objects in a * bucket. A 200 OK response can contain valid or invalid XML. Be sure to design * your application to parse the contents of the response and handle it * appropriately.

This action has been revised. We recommend * that you use the newer version, ListObjectsV2, * when developing applications. For backward compatibility, Amazon S3 continues to * support ListObjects.

The following operations * are related to ListObjects:

See Also:

AWS API * Reference

*/ virtual Model::ListObjectsOutcome ListObjects(const Model::ListObjectsRequest& request) const; /** *

Returns some or all (up to 1,000) of the objects in a bucket. You can use the * request parameters as selection criteria to return a subset of the objects in a * bucket. A 200 OK response can contain valid or invalid XML. Be sure to design * your application to parse the contents of the response and handle it * appropriately.

This action has been revised. We recommend * that you use the newer version, ListObjectsV2, * when developing applications. For backward compatibility, Amazon S3 continues to * support ListObjects.

The following operations * are related to ListObjects:

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListObjectsOutcomeCallable ListObjectsCallable(const Model::ListObjectsRequest& request) const; /** *

Returns some or all (up to 1,000) of the objects in a bucket. You can use the * request parameters as selection criteria to return a subset of the objects in a * bucket. A 200 OK response can contain valid or invalid XML. Be sure to design * your application to parse the contents of the response and handle it * appropriately.

This action has been revised. We recommend * that you use the newer version, ListObjectsV2, * when developing applications. For backward compatibility, Amazon S3 continues to * support ListObjects.

The following operations * are related to ListObjects:

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListObjectsAsync(const Model::ListObjectsRequest& request, const ListObjectsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns some or all (up to 1,000) of the objects in a bucket with each * request. You can use the request parameters as selection criteria to return a * subset of the objects in a bucket. A 200 OK response can contain * valid or invalid XML. Make sure to design your application to parse the contents * of the response and handle it appropriately. Objects are returned sorted in an * ascending order of the respective key names in the list. For more information * about listing objects, see Listing * object keys programmatically

To use this operation, you must have * READ access to the bucket.

To use this action in an Identity and Access * Management (IAM) policy, you must have permissions to perform the * s3:ListBucket action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

This * section describes the latest revision of this action. We recommend that you use * this revised API for application development. For backward compatibility, Amazon * S3 continues to support the prior version of this API, ListObjects.

*

To get a list of your buckets, see ListBuckets.

*

The following operations are related to ListObjectsV2:

See Also:

AWS * API Reference

*/ virtual Model::ListObjectsV2Outcome ListObjectsV2(const Model::ListObjectsV2Request& request) const; /** *

Returns some or all (up to 1,000) of the objects in a bucket with each * request. You can use the request parameters as selection criteria to return a * subset of the objects in a bucket. A 200 OK response can contain * valid or invalid XML. Make sure to design your application to parse the contents * of the response and handle it appropriately. Objects are returned sorted in an * ascending order of the respective key names in the list. For more information * about listing objects, see Listing * object keys programmatically

To use this operation, you must have * READ access to the bucket.

To use this action in an Identity and Access * Management (IAM) policy, you must have permissions to perform the * s3:ListBucket action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

This * section describes the latest revision of this action. We recommend that you use * this revised API for application development. For backward compatibility, Amazon * S3 continues to support the prior version of this API, ListObjects.

*

To get a list of your buckets, see ListBuckets.

*

The following operations are related to ListObjectsV2:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListObjectsV2OutcomeCallable ListObjectsV2Callable(const Model::ListObjectsV2Request& request) const; /** *

Returns some or all (up to 1,000) of the objects in a bucket with each * request. You can use the request parameters as selection criteria to return a * subset of the objects in a bucket. A 200 OK response can contain * valid or invalid XML. Make sure to design your application to parse the contents * of the response and handle it appropriately. Objects are returned sorted in an * ascending order of the respective key names in the list. For more information * about listing objects, see Listing * object keys programmatically

To use this operation, you must have * READ access to the bucket.

To use this action in an Identity and Access * Management (IAM) policy, you must have permissions to perform the * s3:ListBucket action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

This * section describes the latest revision of this action. We recommend that you use * this revised API for application development. For backward compatibility, Amazon * S3 continues to support the prior version of this API, ListObjects.

*

To get a list of your buckets, see ListBuckets.

*

The following operations are related to ListObjectsV2:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListObjectsV2Async(const Model::ListObjectsV2Request& request, const ListObjectsV2ResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the parts that have been uploaded for a specific multipart upload. This * operation must include the upload ID, which you obtain by sending the initiate * multipart upload request (see CreateMultipartUpload). * This request returns a maximum of 1,000 uploaded parts. The default number of * parts returned is 1,000 parts. You can restrict the number of parts returned by * specifying the max-parts request parameter. If your multipart * upload consists of more than 1,000 parts, the response returns an * IsTruncated field with the value of true, and a * NextPartNumberMarker element. In subsequent ListParts * requests you can include the part-number-marker query string parameter and set * its value to the NextPartNumberMarker field value from the previous * response.

For more information on multipart uploads, see Uploading * Objects Using Multipart Upload.

For information on permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

The following operations are related to * ListParts:

See Also:

AWS API * Reference

*/ virtual Model::ListPartsOutcome ListParts(const Model::ListPartsRequest& request) const; /** *

Lists the parts that have been uploaded for a specific multipart upload. This * operation must include the upload ID, which you obtain by sending the initiate * multipart upload request (see CreateMultipartUpload). * This request returns a maximum of 1,000 uploaded parts. The default number of * parts returned is 1,000 parts. You can restrict the number of parts returned by * specifying the max-parts request parameter. If your multipart * upload consists of more than 1,000 parts, the response returns an * IsTruncated field with the value of true, and a * NextPartNumberMarker element. In subsequent ListParts * requests you can include the part-number-marker query string parameter and set * its value to the NextPartNumberMarker field value from the previous * response.

For more information on multipart uploads, see Uploading * Objects Using Multipart Upload.

For information on permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

The following operations are related to * ListParts:

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListPartsOutcomeCallable ListPartsCallable(const Model::ListPartsRequest& request) const; /** *

Lists the parts that have been uploaded for a specific multipart upload. This * operation must include the upload ID, which you obtain by sending the initiate * multipart upload request (see CreateMultipartUpload). * This request returns a maximum of 1,000 uploaded parts. The default number of * parts returned is 1,000 parts. You can restrict the number of parts returned by * specifying the max-parts request parameter. If your multipart * upload consists of more than 1,000 parts, the response returns an * IsTruncated field with the value of true, and a * NextPartNumberMarker element. In subsequent ListParts * requests you can include the part-number-marker query string parameter and set * its value to the NextPartNumberMarker field value from the previous * response.

For more information on multipart uploads, see Uploading * Objects Using Multipart Upload.

For information on permissions * required to use the multipart upload API, see Multipart * Upload and Permissions.

The following operations are related to * ListParts:

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListPartsAsync(const Model::ListPartsRequest& request, const ListPartsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer * Acceleration is a bucket-level feature that enables you to perform faster data * transfers to Amazon S3.

To use this operation, you must have permission * to perform the s3:PutAccelerateConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The Transfer * Acceleration state of a bucket can be set to one of the following two * values:

  • Enabled – Enables accelerated data transfers to the * bucket.

  • Suspended – Disables accelerated data transfers to * the bucket.

The GetBucketAccelerateConfiguration * action returns the transfer acceleration state of a bucket.

After setting * the Transfer Acceleration state of a bucket to Enabled, it might take up to * thirty minutes before the data transfer rates to the bucket increase.

* The name of the bucket used for Transfer Acceleration must be DNS-compliant and * must not contain periods (".").

For more information about transfer * acceleration, see Transfer * Acceleration.

The following operations are related to * PutBucketAccelerateConfiguration:

See Also:

AWS * API Reference

*/ virtual Model::PutBucketAccelerateConfigurationOutcome PutBucketAccelerateConfiguration(const Model::PutBucketAccelerateConfigurationRequest& request) const; /** *

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer * Acceleration is a bucket-level feature that enables you to perform faster data * transfers to Amazon S3.

To use this operation, you must have permission * to perform the s3:PutAccelerateConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The Transfer * Acceleration state of a bucket can be set to one of the following two * values:

  • Enabled – Enables accelerated data transfers to the * bucket.

  • Suspended – Disables accelerated data transfers to * the bucket.

The GetBucketAccelerateConfiguration * action returns the transfer acceleration state of a bucket.

After setting * the Transfer Acceleration state of a bucket to Enabled, it might take up to * thirty minutes before the data transfer rates to the bucket increase.

* The name of the bucket used for Transfer Acceleration must be DNS-compliant and * must not contain periods (".").

For more information about transfer * acceleration, see Transfer * Acceleration.

The following operations are related to * PutBucketAccelerateConfiguration:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketAccelerateConfigurationOutcomeCallable PutBucketAccelerateConfigurationCallable(const Model::PutBucketAccelerateConfigurationRequest& request) const; /** *

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer * Acceleration is a bucket-level feature that enables you to perform faster data * transfers to Amazon S3.

To use this operation, you must have permission * to perform the s3:PutAccelerateConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

The Transfer * Acceleration state of a bucket can be set to one of the following two * values:

  • Enabled – Enables accelerated data transfers to the * bucket.

  • Suspended – Disables accelerated data transfers to * the bucket.

The GetBucketAccelerateConfiguration * action returns the transfer acceleration state of a bucket.

After setting * the Transfer Acceleration state of a bucket to Enabled, it might take up to * thirty minutes before the data transfer rates to the bucket increase.

* The name of the bucket used for Transfer Acceleration must be DNS-compliant and * must not contain periods (".").

For more information about transfer * acceleration, see Transfer * Acceleration.

The following operations are related to * PutBucketAccelerateConfiguration:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketAccelerateConfigurationAsync(const Model::PutBucketAccelerateConfigurationRequest& request, const PutBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets the permissions on an existing bucket using access control lists (ACL). * For more information, see Using * ACLs. To set the ACL of a bucket, you must have WRITE_ACP * permission.

You can use one of the following two ways to set a bucket's * permissions:

  • Specify the ACL in the request body

  • *

    Specify permissions using request headers

You * cannot specify access permission using both the body and the request * headers.

Depending on your application needs, you may choose to * set the ACL on a bucket using either the request body or the headers. For * example, if you have an existing application that updates a bucket ACL using the * request body, then you can continue to use that approach.

Access * Permissions

You can set access permissions using one of the * following methods:

  • Specify a canned ACL with the * x-amz-acl request header. Amazon S3 supports a set of predefined * ACLs, known as canned ACLs. Each canned ACL has a predefined set of * grantees and permissions. Specify the canned ACL name as the value of * x-amz-acl. If you use this header, you cannot use other access * control-specific headers in your request. For more information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. When using these headers, you specify explicit access permissions and * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use the * x-amz-acl header to set a canned ACL. These parameters map to the * set of permissions that Amazon S3 supports in an ACL. For more information, see * Access * Control List (ACL) Overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-write header * grants create, overwrite, and delete objects permission to LogDelivery group * predefined by Amazon S3 and two Amazon Web Services accounts identified by their * email addresses.

    x-amz-grant-write: * uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", * id="555566667777"

You can use either a canned ACL or * specify access permissions explicitly. You cannot do both.

Grantee * Values

You can specify the person (grantee) to whom you're assigning * access rights (using request elements) in the following ways:

  • *

    By the person's ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request

  • By URI:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

    Using email * addresses to specify a grantee is only supported in the following Amazon Web * Services Regions:

    • US East (N. Virginia)

    • US * West (N. California)

    • US West (Oregon)

    • Asia * Pacific (Singapore)

    • Asia Pacific (Sydney)

    • *

      Asia Pacific (Tokyo)

    • Europe (Ireland)

    • *

      South America (São Paulo)

    For a list of all the Amazon S3 * supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

    *

Related Resources

See Also:

AWS API * Reference

*/ virtual Model::PutBucketAclOutcome PutBucketAcl(const Model::PutBucketAclRequest& request) const; /** *

Sets the permissions on an existing bucket using access control lists (ACL). * For more information, see Using * ACLs. To set the ACL of a bucket, you must have WRITE_ACP * permission.

You can use one of the following two ways to set a bucket's * permissions:

  • Specify the ACL in the request body

  • *

    Specify permissions using request headers

You * cannot specify access permission using both the body and the request * headers.

Depending on your application needs, you may choose to * set the ACL on a bucket using either the request body or the headers. For * example, if you have an existing application that updates a bucket ACL using the * request body, then you can continue to use that approach.

Access * Permissions

You can set access permissions using one of the * following methods:

  • Specify a canned ACL with the * x-amz-acl request header. Amazon S3 supports a set of predefined * ACLs, known as canned ACLs. Each canned ACL has a predefined set of * grantees and permissions. Specify the canned ACL name as the value of * x-amz-acl. If you use this header, you cannot use other access * control-specific headers in your request. For more information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. When using these headers, you specify explicit access permissions and * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use the * x-amz-acl header to set a canned ACL. These parameters map to the * set of permissions that Amazon S3 supports in an ACL. For more information, see * Access * Control List (ACL) Overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-write header * grants create, overwrite, and delete objects permission to LogDelivery group * predefined by Amazon S3 and two Amazon Web Services accounts identified by their * email addresses.

    x-amz-grant-write: * uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", * id="555566667777"

You can use either a canned ACL or * specify access permissions explicitly. You cannot do both.

Grantee * Values

You can specify the person (grantee) to whom you're assigning * access rights (using request elements) in the following ways:

  • *

    By the person's ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request

  • By URI:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

    Using email * addresses to specify a grantee is only supported in the following Amazon Web * Services Regions:

    • US East (N. Virginia)

    • US * West (N. California)

    • US West (Oregon)

    • Asia * Pacific (Singapore)

    • Asia Pacific (Sydney)

    • *

      Asia Pacific (Tokyo)

    • Europe (Ireland)

    • *

      South America (São Paulo)

    For a list of all the Amazon S3 * supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

    *

Related Resources

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketAclOutcomeCallable PutBucketAclCallable(const Model::PutBucketAclRequest& request) const; /** *

Sets the permissions on an existing bucket using access control lists (ACL). * For more information, see Using * ACLs. To set the ACL of a bucket, you must have WRITE_ACP * permission.

You can use one of the following two ways to set a bucket's * permissions:

  • Specify the ACL in the request body

  • *

    Specify permissions using request headers

You * cannot specify access permission using both the body and the request * headers.

Depending on your application needs, you may choose to * set the ACL on a bucket using either the request body or the headers. For * example, if you have an existing application that updates a bucket ACL using the * request body, then you can continue to use that approach.

Access * Permissions

You can set access permissions using one of the * following methods:

  • Specify a canned ACL with the * x-amz-acl request header. Amazon S3 supports a set of predefined * ACLs, known as canned ACLs. Each canned ACL has a predefined set of * grantees and permissions. Specify the canned ACL name as the value of * x-amz-acl. If you use this header, you cannot use other access * control-specific headers in your request. For more information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. When using these headers, you specify explicit access permissions and * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use the * x-amz-acl header to set a canned ACL. These parameters map to the * set of permissions that Amazon S3 supports in an ACL. For more information, see * Access * Control List (ACL) Overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-write header * grants create, overwrite, and delete objects permission to LogDelivery group * predefined by Amazon S3 and two Amazon Web Services accounts identified by their * email addresses.

    x-amz-grant-write: * uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", * id="555566667777"

You can use either a canned ACL or * specify access permissions explicitly. You cannot do both.

Grantee * Values

You can specify the person (grantee) to whom you're assigning * access rights (using request elements) in the following ways:

  • *

    By the person's ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request

  • By URI:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

    Using email * addresses to specify a grantee is only supported in the following Amazon Web * Services Regions:

    • US East (N. Virginia)

    • US * West (N. California)

    • US West (Oregon)

    • Asia * Pacific (Singapore)

    • Asia Pacific (Sydney)

    • *

      Asia Pacific (Tokyo)

    • Europe (Ireland)

    • *

      South America (São Paulo)

    For a list of all the Amazon S3 * supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

    *

Related Resources

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketAclAsync(const Model::PutBucketAclRequest& request, const PutBucketAclResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets an analytics configuration for the bucket (specified by the analytics * configuration ID). You can have up to 1,000 analytics configurations per * bucket.

You can choose to have storage class analysis export analysis * reports sent to a comma-separated values (CSV) flat file. See the * DataExport request element. Reports are updated daily and are based * on the object filters that you configure. When selecting data export, you * specify a destination bucket and an optional destination prefix where the file * is written. You can export the data to a destination bucket in a different * account. However, the destination bucket must be in the same Region as the * bucket that you are making the PUT analytics configuration to. For more * information, see Amazon * S3 Analytics – Storage Class Analysis.

You must create a * bucket policy on the destination bucket where the exported file is written to * grant permissions to Amazon S3 to write objects to the bucket. For an example * policy, see Granting * Permissions for Amazon S3 Inventory and Storage Class Analysis.

*

To use this operation, you must have permissions to perform the * s3:PutAnalyticsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* Special Errors

    • HTTP Error: HTTP 400 Bad * Request

    • Code: InvalidArgument

    • * Cause: Invalid argument.

    • * HTTP Error: HTTP 400 Bad Request

    • Code: * TooManyConfigurations

    • Cause: You are attempting to * create a new configuration but have already reached the 1,000-configuration * limit.

    • HTTP Error: HTTP 403 * Forbidden

    • Code: AccessDenied

    • * Cause: You are not the owner of the specified bucket, or you do not have the * s3:PutAnalyticsConfiguration bucket permission to set the configuration on the * bucket.

Related * Resources

See Also:

AWS * API Reference

*/ virtual Model::PutBucketAnalyticsConfigurationOutcome PutBucketAnalyticsConfiguration(const Model::PutBucketAnalyticsConfigurationRequest& request) const; /** *

Sets an analytics configuration for the bucket (specified by the analytics * configuration ID). You can have up to 1,000 analytics configurations per * bucket.

You can choose to have storage class analysis export analysis * reports sent to a comma-separated values (CSV) flat file. See the * DataExport request element. Reports are updated daily and are based * on the object filters that you configure. When selecting data export, you * specify a destination bucket and an optional destination prefix where the file * is written. You can export the data to a destination bucket in a different * account. However, the destination bucket must be in the same Region as the * bucket that you are making the PUT analytics configuration to. For more * information, see Amazon * S3 Analytics – Storage Class Analysis.

You must create a * bucket policy on the destination bucket where the exported file is written to * grant permissions to Amazon S3 to write objects to the bucket. For an example * policy, see Granting * Permissions for Amazon S3 Inventory and Storage Class Analysis.

*

To use this operation, you must have permissions to perform the * s3:PutAnalyticsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* Special Errors

    • HTTP Error: HTTP 400 Bad * Request

    • Code: InvalidArgument

    • * Cause: Invalid argument.

    • * HTTP Error: HTTP 400 Bad Request

    • Code: * TooManyConfigurations

    • Cause: You are attempting to * create a new configuration but have already reached the 1,000-configuration * limit.

    • HTTP Error: HTTP 403 * Forbidden

    • Code: AccessDenied

    • * Cause: You are not the owner of the specified bucket, or you do not have the * s3:PutAnalyticsConfiguration bucket permission to set the configuration on the * bucket.

Related * Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketAnalyticsConfigurationOutcomeCallable PutBucketAnalyticsConfigurationCallable(const Model::PutBucketAnalyticsConfigurationRequest& request) const; /** *

Sets an analytics configuration for the bucket (specified by the analytics * configuration ID). You can have up to 1,000 analytics configurations per * bucket.

You can choose to have storage class analysis export analysis * reports sent to a comma-separated values (CSV) flat file. See the * DataExport request element. Reports are updated daily and are based * on the object filters that you configure. When selecting data export, you * specify a destination bucket and an optional destination prefix where the file * is written. You can export the data to a destination bucket in a different * account. However, the destination bucket must be in the same Region as the * bucket that you are making the PUT analytics configuration to. For more * information, see Amazon * S3 Analytics – Storage Class Analysis.

You must create a * bucket policy on the destination bucket where the exported file is written to * grant permissions to Amazon S3 to write objects to the bucket. For an example * policy, see Granting * Permissions for Amazon S3 Inventory and Storage Class Analysis.

*

To use this operation, you must have permissions to perform the * s3:PutAnalyticsConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* Special Errors

    • HTTP Error: HTTP 400 Bad * Request

    • Code: InvalidArgument

    • * Cause: Invalid argument.

    • * HTTP Error: HTTP 400 Bad Request

    • Code: * TooManyConfigurations

    • Cause: You are attempting to * create a new configuration but have already reached the 1,000-configuration * limit.

    • HTTP Error: HTTP 403 * Forbidden

    • Code: AccessDenied

    • * Cause: You are not the owner of the specified bucket, or you do not have the * s3:PutAnalyticsConfiguration bucket permission to set the configuration on the * bucket.

Related * Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketAnalyticsConfigurationAsync(const Model::PutBucketAnalyticsConfigurationRequest& request, const PutBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets the cors configuration for your bucket. If the * configuration exists, Amazon S3 replaces it.

To use this operation, you * must be allowed to perform the s3:PutBucketCORS action. By default, * the bucket owner has this permission and can grant it to others.

You set * this configuration on a bucket so that the bucket can service cross-origin * requests. For example, you might want to enable a request whose origin is * http://www.example.com to access your Amazon S3 bucket at * my.example.bucket.com by using the browser's * XMLHttpRequest capability.

To enable cross-origin resource * sharing (CORS) on a bucket, you add the cors subresource to the * bucket. The cors subresource is an XML document in which you * configure rules that identify origins and the HTTP methods that can be executed * on your bucket. The document is limited to 64 KB in size.

When Amazon S3 * receives a cross-origin request (or a pre-flight OPTIONS request) against a * bucket, it evaluates the cors configuration on the bucket and uses * the first CORSRule rule that matches the incoming browser request * to enable a cross-origin request. For a rule to match, the following conditions * must be met:

  • The request's Origin header must * match AllowedOrigin elements.

  • The request method * (for example, GET, PUT, HEAD, and so on) or the * Access-Control-Request-Method header in case of a pre-flight * OPTIONS request must be one of the AllowedMethod * elements.

  • Every header specified in the * Access-Control-Request-Headers request header of a pre-flight * request must match an AllowedHeader element.

* For more information about CORS, go to Enabling * Cross-Origin Resource Sharing in the Amazon S3 User Guide.

Related Resources

See Also:

AWS * API Reference

*/ virtual Model::PutBucketCorsOutcome PutBucketCors(const Model::PutBucketCorsRequest& request) const; /** *

Sets the cors configuration for your bucket. If the * configuration exists, Amazon S3 replaces it.

To use this operation, you * must be allowed to perform the s3:PutBucketCORS action. By default, * the bucket owner has this permission and can grant it to others.

You set * this configuration on a bucket so that the bucket can service cross-origin * requests. For example, you might want to enable a request whose origin is * http://www.example.com to access your Amazon S3 bucket at * my.example.bucket.com by using the browser's * XMLHttpRequest capability.

To enable cross-origin resource * sharing (CORS) on a bucket, you add the cors subresource to the * bucket. The cors subresource is an XML document in which you * configure rules that identify origins and the HTTP methods that can be executed * on your bucket. The document is limited to 64 KB in size.

When Amazon S3 * receives a cross-origin request (or a pre-flight OPTIONS request) against a * bucket, it evaluates the cors configuration on the bucket and uses * the first CORSRule rule that matches the incoming browser request * to enable a cross-origin request. For a rule to match, the following conditions * must be met:

  • The request's Origin header must * match AllowedOrigin elements.

  • The request method * (for example, GET, PUT, HEAD, and so on) or the * Access-Control-Request-Method header in case of a pre-flight * OPTIONS request must be one of the AllowedMethod * elements.

  • Every header specified in the * Access-Control-Request-Headers request header of a pre-flight * request must match an AllowedHeader element.

* For more information about CORS, go to Enabling * Cross-Origin Resource Sharing in the Amazon S3 User Guide.

Related Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketCorsOutcomeCallable PutBucketCorsCallable(const Model::PutBucketCorsRequest& request) const; /** *

Sets the cors configuration for your bucket. If the * configuration exists, Amazon S3 replaces it.

To use this operation, you * must be allowed to perform the s3:PutBucketCORS action. By default, * the bucket owner has this permission and can grant it to others.

You set * this configuration on a bucket so that the bucket can service cross-origin * requests. For example, you might want to enable a request whose origin is * http://www.example.com to access your Amazon S3 bucket at * my.example.bucket.com by using the browser's * XMLHttpRequest capability.

To enable cross-origin resource * sharing (CORS) on a bucket, you add the cors subresource to the * bucket. The cors subresource is an XML document in which you * configure rules that identify origins and the HTTP methods that can be executed * on your bucket. The document is limited to 64 KB in size.

When Amazon S3 * receives a cross-origin request (or a pre-flight OPTIONS request) against a * bucket, it evaluates the cors configuration on the bucket and uses * the first CORSRule rule that matches the incoming browser request * to enable a cross-origin request. For a rule to match, the following conditions * must be met:

  • The request's Origin header must * match AllowedOrigin elements.

  • The request method * (for example, GET, PUT, HEAD, and so on) or the * Access-Control-Request-Method header in case of a pre-flight * OPTIONS request must be one of the AllowedMethod * elements.

  • Every header specified in the * Access-Control-Request-Headers request header of a pre-flight * request must match an AllowedHeader element.

* For more information about CORS, go to Enabling * Cross-Origin Resource Sharing in the Amazon S3 User Guide.

Related Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketCorsAsync(const Model::PutBucketCorsRequest& request, const PutBucketCorsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action uses the encryption subresource to configure default * encryption and Amazon S3 Bucket Key for an existing bucket.

Default * encryption for a bucket can use server-side encryption with Amazon S3-managed * keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify default * encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For * information about default encryption, see Amazon * S3 default bucket encryption in the Amazon S3 User Guide. For more * information about S3 Bucket Keys, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

This * action requires Amazon Web Services Signature Version 4. For more information, * see * Authenticating Requests (Amazon Web Services Signature Version 4).

*

To use this operation, you must have permissions to perform the * s3:PutEncryptionConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. *

Related Resources

See Also:

AWS * API Reference

*/ virtual Model::PutBucketEncryptionOutcome PutBucketEncryption(const Model::PutBucketEncryptionRequest& request) const; /** *

This action uses the encryption subresource to configure default * encryption and Amazon S3 Bucket Key for an existing bucket.

Default * encryption for a bucket can use server-side encryption with Amazon S3-managed * keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify default * encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For * information about default encryption, see Amazon * S3 default bucket encryption in the Amazon S3 User Guide. For more * information about S3 Bucket Keys, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

This * action requires Amazon Web Services Signature Version 4. For more information, * see * Authenticating Requests (Amazon Web Services Signature Version 4).

*

To use this operation, you must have permissions to perform the * s3:PutEncryptionConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. *

Related Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketEncryptionOutcomeCallable PutBucketEncryptionCallable(const Model::PutBucketEncryptionRequest& request) const; /** *

This action uses the encryption subresource to configure default * encryption and Amazon S3 Bucket Key for an existing bucket.

Default * encryption for a bucket can use server-side encryption with Amazon S3-managed * keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify default * encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For * information about default encryption, see Amazon * S3 default bucket encryption in the Amazon S3 User Guide. For more * information about S3 Bucket Keys, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

This * action requires Amazon Web Services Signature Version 4. For more information, * see * Authenticating Requests (Amazon Web Services Signature Version 4).

*

To use this operation, you must have permissions to perform the * s3:PutEncryptionConfiguration action. The bucket owner has this * permission by default. The bucket owner can grant this permission to others. For * more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. *

Related Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketEncryptionAsync(const Model::PutBucketEncryptionRequest& request, const PutBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can * have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

The S3 * Intelligent-Tiering storage class is designed to optimize storage costs by * automatically moving data to the most cost-effective storage access tier, * without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * PutBucketIntelligentTieringConfiguration include:

You only need S3 Intelligent-Tiering enabled on a * bucket if you want to automatically move objects stored in the S3 * Intelligent-Tiering storage class to the Archive Access or Deep Archive Access * tier.

Special Errors

  • HTTP 400 Bad Request Error

    • Code: * InvalidArgument

    • Cause: Invalid Argument

    • *
  • HTTP 400 Bad Request Error

      *
    • Code: TooManyConfigurations

    • Cause: You * are attempting to create a new configuration but have already reached the * 1,000-configuration limit.

  • HTTP * 403 Forbidden Error

    • Code: AccessDenied

    • *
    • Cause: You are not the owner of the specified bucket, or you do * not have the s3:PutIntelligentTieringConfiguration bucket * permission to set the configuration on the bucket.

  • *

See Also:

AWS * API Reference

*/ virtual Model::PutBucketIntelligentTieringConfigurationOutcome PutBucketIntelligentTieringConfiguration(const Model::PutBucketIntelligentTieringConfigurationRequest& request) const; /** *

Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can * have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

The S3 * Intelligent-Tiering storage class is designed to optimize storage costs by * automatically moving data to the most cost-effective storage access tier, * without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * PutBucketIntelligentTieringConfiguration include:

You only need S3 Intelligent-Tiering enabled on a * bucket if you want to automatically move objects stored in the S3 * Intelligent-Tiering storage class to the Archive Access or Deep Archive Access * tier.

Special Errors

  • HTTP 400 Bad Request Error

    • Code: * InvalidArgument

    • Cause: Invalid Argument

    • *
  • HTTP 400 Bad Request Error

      *
    • Code: TooManyConfigurations

    • Cause: You * are attempting to create a new configuration but have already reached the * 1,000-configuration limit.

  • HTTP * 403 Forbidden Error

    • Code: AccessDenied

    • *
    • Cause: You are not the owner of the specified bucket, or you do * not have the s3:PutIntelligentTieringConfiguration bucket * permission to set the configuration on the bucket.

  • *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketIntelligentTieringConfigurationOutcomeCallable PutBucketIntelligentTieringConfigurationCallable(const Model::PutBucketIntelligentTieringConfigurationRequest& request) const; /** *

Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can * have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

The S3 * Intelligent-Tiering storage class is designed to optimize storage costs by * automatically moving data to the most cost-effective storage access tier, * without performance impact or operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings in two low latency and high throughput access * tiers. For data that can be accessed asynchronously, you can choose to activate * automatic archiving capabilities within the S3 Intelligent-Tiering storage * class.

The S3 Intelligent-Tiering storage class is the ideal storage * class for data with unknown, changing, or unpredictable access patterns, * independent of object size or retention period. If the size of an object is less * than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, * but they are always charged at the Frequent Access tier rates in the S3 * Intelligent-Tiering storage class.

For more information, see Storage * class for automatically optimizing frequently and infrequently accessed * objects.

Operations related to * PutBucketIntelligentTieringConfiguration include:

You only need S3 Intelligent-Tiering enabled on a * bucket if you want to automatically move objects stored in the S3 * Intelligent-Tiering storage class to the Archive Access or Deep Archive Access * tier.

Special Errors

  • HTTP 400 Bad Request Error

    • Code: * InvalidArgument

    • Cause: Invalid Argument

    • *
  • HTTP 400 Bad Request Error

      *
    • Code: TooManyConfigurations

    • Cause: You * are attempting to create a new configuration but have already reached the * 1,000-configuration limit.

  • HTTP * 403 Forbidden Error

    • Code: AccessDenied

    • *
    • Cause: You are not the owner of the specified bucket, or you do * not have the s3:PutIntelligentTieringConfiguration bucket * permission to set the configuration on the bucket.

  • *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketIntelligentTieringConfigurationAsync(const Model::PutBucketIntelligentTieringConfigurationRequest& request, const PutBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This implementation of the PUT action adds an inventory * configuration (identified by the inventory ID) to the bucket. You can have up to * 1,000 inventory configurations per bucket.

Amazon S3 inventory generates * inventories of the objects in the bucket on a daily or weekly basis, and the * results are published to a flat file. The bucket that is inventoried is called * the source bucket, and the bucket where the inventory flat file is stored * is called the destination bucket. The destination bucket must be * in the same Amazon Web Services Region as the source bucket.

When * you configure an inventory for a source bucket, you specify the * destination bucket where you want the inventory to be stored, and whether * to generate the inventory daily or weekly. You can also configure what object * metadata to include and whether to inventory all object versions or only current * versions. For more information, see Amazon * S3 Inventory in the Amazon S3 User Guide.

You must create * a bucket policy on the destination bucket to grant permissions to Amazon * S3 to write objects to the bucket in the defined location. For an example * policy, see * Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

*

To use this operation, you must have permissions to perform the * s3:PutInventoryConfiguration action. The bucket owner has this * permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

Special Errors

  • HTTP 400 Bad Request Error

    • Code: * InvalidArgument

    • Cause: Invalid Argument

    • *
  • HTTP 400 Bad Request Error

      *
    • Code: TooManyConfigurations

    • Cause: You * are attempting to create a new configuration but have already reached the * 1,000-configuration limit.

  • HTTP * 403 Forbidden Error

    • Code: AccessDenied

    • *
    • Cause: You are not the owner of the specified bucket, or you do * not have the s3:PutInventoryConfiguration bucket permission to set * the configuration on the bucket.

* Related Resources

See Also:

AWS * API Reference

*/ virtual Model::PutBucketInventoryConfigurationOutcome PutBucketInventoryConfiguration(const Model::PutBucketInventoryConfigurationRequest& request) const; /** *

This implementation of the PUT action adds an inventory * configuration (identified by the inventory ID) to the bucket. You can have up to * 1,000 inventory configurations per bucket.

Amazon S3 inventory generates * inventories of the objects in the bucket on a daily or weekly basis, and the * results are published to a flat file. The bucket that is inventoried is called * the source bucket, and the bucket where the inventory flat file is stored * is called the destination bucket. The destination bucket must be * in the same Amazon Web Services Region as the source bucket.

When * you configure an inventory for a source bucket, you specify the * destination bucket where you want the inventory to be stored, and whether * to generate the inventory daily or weekly. You can also configure what object * metadata to include and whether to inventory all object versions or only current * versions. For more information, see Amazon * S3 Inventory in the Amazon S3 User Guide.

You must create * a bucket policy on the destination bucket to grant permissions to Amazon * S3 to write objects to the bucket in the defined location. For an example * policy, see * Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

*

To use this operation, you must have permissions to perform the * s3:PutInventoryConfiguration action. The bucket owner has this * permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

Special Errors

  • HTTP 400 Bad Request Error

    • Code: * InvalidArgument

    • Cause: Invalid Argument

    • *
  • HTTP 400 Bad Request Error

      *
    • Code: TooManyConfigurations

    • Cause: You * are attempting to create a new configuration but have already reached the * 1,000-configuration limit.

  • HTTP * 403 Forbidden Error

    • Code: AccessDenied

    • *
    • Cause: You are not the owner of the specified bucket, or you do * not have the s3:PutInventoryConfiguration bucket permission to set * the configuration on the bucket.

* Related Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketInventoryConfigurationOutcomeCallable PutBucketInventoryConfigurationCallable(const Model::PutBucketInventoryConfigurationRequest& request) const; /** *

This implementation of the PUT action adds an inventory * configuration (identified by the inventory ID) to the bucket. You can have up to * 1,000 inventory configurations per bucket.

Amazon S3 inventory generates * inventories of the objects in the bucket on a daily or weekly basis, and the * results are published to a flat file. The bucket that is inventoried is called * the source bucket, and the bucket where the inventory flat file is stored * is called the destination bucket. The destination bucket must be * in the same Amazon Web Services Region as the source bucket.

When * you configure an inventory for a source bucket, you specify the * destination bucket where you want the inventory to be stored, and whether * to generate the inventory daily or weekly. You can also configure what object * metadata to include and whether to inventory all object versions or only current * versions. For more information, see Amazon * S3 Inventory in the Amazon S3 User Guide.

You must create * a bucket policy on the destination bucket to grant permissions to Amazon * S3 to write objects to the bucket in the defined location. For an example * policy, see * Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

*

To use this operation, you must have permissions to perform the * s3:PutInventoryConfiguration action. The bucket owner has this * permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

Special Errors

  • HTTP 400 Bad Request Error

    • Code: * InvalidArgument

    • Cause: Invalid Argument

    • *
  • HTTP 400 Bad Request Error

      *
    • Code: TooManyConfigurations

    • Cause: You * are attempting to create a new configuration but have already reached the * 1,000-configuration limit.

  • HTTP * 403 Forbidden Error

    • Code: AccessDenied

    • *
    • Cause: You are not the owner of the specified bucket, or you do * not have the s3:PutInventoryConfiguration bucket permission to set * the configuration on the bucket.

* Related Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketInventoryConfigurationAsync(const Model::PutBucketInventoryConfigurationRequest& request, const PutBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a new lifecycle configuration for the bucket or replaces an existing * lifecycle configuration. For information about lifecycle configuration, see Managing * your storage lifecycle.

Bucket lifecycle configuration now * supports specifying a lifecycle rule using an object key name prefix, one or * more object tags, or a combination of both. Accordingly, this section describes * the latest API. The previous version of the API supported filtering based only * on an object key name prefix, which is supported for backward compatibility. For * the related API description, see PutBucketLifecycle.

*

Rules

You specify the lifecycle configuration in your * request body. The lifecycle configuration is specified as XML consisting of one * or more rules. Each rule consists of the following:

  • Filter * identifying a subset of objects to which the rule applies. The filter can be * based on a key name prefix, object tags, or a combination of both.

  • *
  • Status whether the rule is in effect.

  • One or more * lifecycle transition and expiration actions that you want Amazon S3 to perform * on the objects identified by the filter. If the state of your bucket is * versioning-enabled or versioning-suspended, you can have many versions of the * same object (one current version and zero or more noncurrent versions). Amazon * S3 provides predefined actions that you can specify for current and noncurrent * object versions.

For more information, see Object * Lifecycle Management and Lifecycle * Configuration Elements.

Permissions

By default, all * Amazon S3 resources are private, including buckets, objects, and related * subresources (for example, lifecycle configuration and website configuration). * Only the resource owner (that is, the Amazon Web Services account that created * it) can access the resource. The resource owner can optionally grant access * permissions to others by writing an access policy. For this operation, a user * must get the s3:PutLifecycleConfiguration permission.

You can also * explicitly deny permissions. Explicit deny also supersedes any other * permissions. If you want to block users or accounts from removing or deleting * objects from your bucket, you must deny them permissions for the following * actions:

  • s3:DeleteObject

  • *

    s3:DeleteObjectVersion

  • s3:PutLifecycleConfiguration

    *

For more information about permissions, see Managing * Access Permissions to Your Amazon S3 Resources.

The following are * related to PutBucketLifecycleConfiguration:

See Also:

AWS * API Reference

*/ virtual Model::PutBucketLifecycleConfigurationOutcome PutBucketLifecycleConfiguration(const Model::PutBucketLifecycleConfigurationRequest& request) const; /** *

Creates a new lifecycle configuration for the bucket or replaces an existing * lifecycle configuration. For information about lifecycle configuration, see Managing * your storage lifecycle.

Bucket lifecycle configuration now * supports specifying a lifecycle rule using an object key name prefix, one or * more object tags, or a combination of both. Accordingly, this section describes * the latest API. The previous version of the API supported filtering based only * on an object key name prefix, which is supported for backward compatibility. For * the related API description, see PutBucketLifecycle.

*

Rules

You specify the lifecycle configuration in your * request body. The lifecycle configuration is specified as XML consisting of one * or more rules. Each rule consists of the following:

  • Filter * identifying a subset of objects to which the rule applies. The filter can be * based on a key name prefix, object tags, or a combination of both.

  • *
  • Status whether the rule is in effect.

  • One or more * lifecycle transition and expiration actions that you want Amazon S3 to perform * on the objects identified by the filter. If the state of your bucket is * versioning-enabled or versioning-suspended, you can have many versions of the * same object (one current version and zero or more noncurrent versions). Amazon * S3 provides predefined actions that you can specify for current and noncurrent * object versions.

For more information, see Object * Lifecycle Management and Lifecycle * Configuration Elements.

Permissions

By default, all * Amazon S3 resources are private, including buckets, objects, and related * subresources (for example, lifecycle configuration and website configuration). * Only the resource owner (that is, the Amazon Web Services account that created * it) can access the resource. The resource owner can optionally grant access * permissions to others by writing an access policy. For this operation, a user * must get the s3:PutLifecycleConfiguration permission.

You can also * explicitly deny permissions. Explicit deny also supersedes any other * permissions. If you want to block users or accounts from removing or deleting * objects from your bucket, you must deny them permissions for the following * actions:

  • s3:DeleteObject

  • *

    s3:DeleteObjectVersion

  • s3:PutLifecycleConfiguration

    *

For more information about permissions, see Managing * Access Permissions to Your Amazon S3 Resources.

The following are * related to PutBucketLifecycleConfiguration:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketLifecycleConfigurationOutcomeCallable PutBucketLifecycleConfigurationCallable(const Model::PutBucketLifecycleConfigurationRequest& request) const; /** *

Creates a new lifecycle configuration for the bucket or replaces an existing * lifecycle configuration. For information about lifecycle configuration, see Managing * your storage lifecycle.

Bucket lifecycle configuration now * supports specifying a lifecycle rule using an object key name prefix, one or * more object tags, or a combination of both. Accordingly, this section describes * the latest API. The previous version of the API supported filtering based only * on an object key name prefix, which is supported for backward compatibility. For * the related API description, see PutBucketLifecycle.

*

Rules

You specify the lifecycle configuration in your * request body. The lifecycle configuration is specified as XML consisting of one * or more rules. Each rule consists of the following:

  • Filter * identifying a subset of objects to which the rule applies. The filter can be * based on a key name prefix, object tags, or a combination of both.

  • *
  • Status whether the rule is in effect.

  • One or more * lifecycle transition and expiration actions that you want Amazon S3 to perform * on the objects identified by the filter. If the state of your bucket is * versioning-enabled or versioning-suspended, you can have many versions of the * same object (one current version and zero or more noncurrent versions). Amazon * S3 provides predefined actions that you can specify for current and noncurrent * object versions.

For more information, see Object * Lifecycle Management and Lifecycle * Configuration Elements.

Permissions

By default, all * Amazon S3 resources are private, including buckets, objects, and related * subresources (for example, lifecycle configuration and website configuration). * Only the resource owner (that is, the Amazon Web Services account that created * it) can access the resource. The resource owner can optionally grant access * permissions to others by writing an access policy. For this operation, a user * must get the s3:PutLifecycleConfiguration permission.

You can also * explicitly deny permissions. Explicit deny also supersedes any other * permissions. If you want to block users or accounts from removing or deleting * objects from your bucket, you must deny them permissions for the following * actions:

  • s3:DeleteObject

  • *

    s3:DeleteObjectVersion

  • s3:PutLifecycleConfiguration

    *

For more information about permissions, see Managing * Access Permissions to Your Amazon S3 Resources.

The following are * related to PutBucketLifecycleConfiguration:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketLifecycleConfigurationAsync(const Model::PutBucketLifecycleConfigurationRequest& request, const PutBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Set the logging parameters for a bucket and to specify permissions for who * can view and modify the logging parameters. All logs are saved to buckets in the * same Amazon Web Services Region as the source bucket. To set the logging status * of a bucket, you must be the bucket owner.

The bucket owner is * automatically granted FULL_CONTROL to all logs. You use the Grantee * request element to grant access to other people. The Permissions * request element specifies the kind of access the grantee has to the logs.

*

Grantee Values

You can specify the person (grantee) to whom * you're assigning access rights (using request elements) in the following * ways:

  • By the person's ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request.

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

  • By URI:

    *

    <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

To enable logging, you use LoggingEnabled and its children * request elements. To disable logging, you use an empty BucketLoggingStatus * request element:

<BucketLoggingStatus * xmlns="http://doc.s3.amazonaws.com/2006-03-01" />

For more * information about server access logging, see Server * Access Logging.

For more information about creating a bucket, see CreateBucket. * For more information about returning the logging status of a bucket, see GetBucketLogging.

*

The following operations are related to PutBucketLogging:

*

See Also:

AWS * API Reference

*/ virtual Model::PutBucketLoggingOutcome PutBucketLogging(const Model::PutBucketLoggingRequest& request) const; /** *

Set the logging parameters for a bucket and to specify permissions for who * can view and modify the logging parameters. All logs are saved to buckets in the * same Amazon Web Services Region as the source bucket. To set the logging status * of a bucket, you must be the bucket owner.

The bucket owner is * automatically granted FULL_CONTROL to all logs. You use the Grantee * request element to grant access to other people. The Permissions * request element specifies the kind of access the grantee has to the logs.

*

Grantee Values

You can specify the person (grantee) to whom * you're assigning access rights (using request elements) in the following * ways:

  • By the person's ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request.

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

  • By URI:

    *

    <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

To enable logging, you use LoggingEnabled and its children * request elements. To disable logging, you use an empty BucketLoggingStatus * request element:

<BucketLoggingStatus * xmlns="http://doc.s3.amazonaws.com/2006-03-01" />

For more * information about server access logging, see Server * Access Logging.

For more information about creating a bucket, see CreateBucket. * For more information about returning the logging status of a bucket, see GetBucketLogging.

*

The following operations are related to PutBucketLogging:

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketLoggingOutcomeCallable PutBucketLoggingCallable(const Model::PutBucketLoggingRequest& request) const; /** *

Set the logging parameters for a bucket and to specify permissions for who * can view and modify the logging parameters. All logs are saved to buckets in the * same Amazon Web Services Region as the source bucket. To set the logging status * of a bucket, you must be the bucket owner.

The bucket owner is * automatically granted FULL_CONTROL to all logs. You use the Grantee * request element to grant access to other people. The Permissions * request element specifies the kind of access the grantee has to the logs.

*

Grantee Values

You can specify the person (grantee) to whom * you're assigning access rights (using request elements) in the following * ways:

  • By the person's ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request.

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

  • By URI:

    *

    <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

To enable logging, you use LoggingEnabled and its children * request elements. To disable logging, you use an empty BucketLoggingStatus * request element:

<BucketLoggingStatus * xmlns="http://doc.s3.amazonaws.com/2006-03-01" />

For more * information about server access logging, see Server * Access Logging.

For more information about creating a bucket, see CreateBucket. * For more information about returning the logging status of a bucket, see GetBucketLogging.

*

The following operations are related to PutBucketLogging:

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketLoggingAsync(const Model::PutBucketLoggingRequest& request, const PutBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets a metrics configuration (specified by the metrics configuration ID) for * the bucket. You can have up to 1,000 metrics configurations per bucket. If * you're updating an existing metrics configuration, note that this is a full * replacement of the existing metrics configuration. If you don't include the * elements you want to keep, they are erased.

To use this operation, you * must have permissions to perform the s3:PutMetricsConfiguration * action. The bucket owner has this permission by default. The bucket owner can * grant this permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to PutBucketMetricsConfiguration:

GetBucketLifecycle has the following special * error:

  • Error code: TooManyConfigurations

      *
    • Description: You are attempting to create a new configuration but have * already reached the 1,000-configuration limit.

    • HTTP Status * Code: HTTP 400 Bad Request

See Also:

AWS * API Reference

*/ virtual Model::PutBucketMetricsConfigurationOutcome PutBucketMetricsConfiguration(const Model::PutBucketMetricsConfigurationRequest& request) const; /** *

Sets a metrics configuration (specified by the metrics configuration ID) for * the bucket. You can have up to 1,000 metrics configurations per bucket. If * you're updating an existing metrics configuration, note that this is a full * replacement of the existing metrics configuration. If you don't include the * elements you want to keep, they are erased.

To use this operation, you * must have permissions to perform the s3:PutMetricsConfiguration * action. The bucket owner has this permission by default. The bucket owner can * grant this permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to PutBucketMetricsConfiguration:

GetBucketLifecycle has the following special * error:

  • Error code: TooManyConfigurations

      *
    • Description: You are attempting to create a new configuration but have * already reached the 1,000-configuration limit.

    • HTTP Status * Code: HTTP 400 Bad Request

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketMetricsConfigurationOutcomeCallable PutBucketMetricsConfigurationCallable(const Model::PutBucketMetricsConfigurationRequest& request) const; /** *

Sets a metrics configuration (specified by the metrics configuration ID) for * the bucket. You can have up to 1,000 metrics configurations per bucket. If * you're updating an existing metrics configuration, note that this is a full * replacement of the existing metrics configuration. If you don't include the * elements you want to keep, they are erased.

To use this operation, you * must have permissions to perform the s3:PutMetricsConfiguration * action. The bucket owner has this permission by default. The bucket owner can * grant this permission to others. For more information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

For information about * CloudWatch request metrics for Amazon S3, see Monitoring * Metrics with Amazon CloudWatch.

The following operations are related * to PutBucketMetricsConfiguration:

GetBucketLifecycle has the following special * error:

  • Error code: TooManyConfigurations

      *
    • Description: You are attempting to create a new configuration but have * already reached the 1,000-configuration limit.

    • HTTP Status * Code: HTTP 400 Bad Request

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketMetricsConfigurationAsync(const Model::PutBucketMetricsConfigurationRequest& request, const PutBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Enables notifications of specified events for a bucket. For more information * about event notifications, see Configuring * Event Notifications.

Using this API, you can replace an existing * notification configuration. The configuration is an XML file that defines the * event types that you want Amazon S3 to publish and the destination where you * want Amazon S3 to publish an event notification when it detects an event of the * specified type.

By default, your bucket has no event notifications * configured. That is, the notification configuration will be an empty * NotificationConfiguration.

* <NotificationConfiguration>

* </NotificationConfiguration>

This action replaces the * existing notification configuration with the configuration you include in the * request body.

After Amazon S3 receives this request, it first verifies * that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue * Service (Amazon SQS) destination exists, and that the bucket owner has * permission to publish to it by sending a test notification. In the case of * Lambda destinations, Amazon S3 verifies that the Lambda function permissions * grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For * more information, see Configuring * Notifications for Amazon S3 Events.

You can disable notifications by * adding the empty NotificationConfiguration element.

By default, only the * bucket owner can configure notifications on a bucket. However, bucket owners can * use a bucket policy to grant permission to other users to set this configuration * with s3:PutBucketNotification permission.

The PUT * notification is an atomic operation. For example, suppose your notification * configuration includes SNS topic, SQS queue, and Lambda function configurations. * When you send a PUT request with this configuration, Amazon S3 sends test * messages to your SNS topic. If the message fails, the entire PUT action will * fail, and Amazon S3 will not add the configuration to your bucket.

*

Responses

If the configuration in the request body includes * only one TopicConfiguration specifying only the * s3:ReducedRedundancyLostObject event type, the response will also * include the x-amz-sns-test-message-id header containing the message * ID of the test notification sent to the topic.

The following action is * related to PutBucketNotificationConfiguration:

See Also:

AWS * API Reference

*/ virtual Model::PutBucketNotificationConfigurationOutcome PutBucketNotificationConfiguration(const Model::PutBucketNotificationConfigurationRequest& request) const; /** *

Enables notifications of specified events for a bucket. For more information * about event notifications, see Configuring * Event Notifications.

Using this API, you can replace an existing * notification configuration. The configuration is an XML file that defines the * event types that you want Amazon S3 to publish and the destination where you * want Amazon S3 to publish an event notification when it detects an event of the * specified type.

By default, your bucket has no event notifications * configured. That is, the notification configuration will be an empty * NotificationConfiguration.

* <NotificationConfiguration>

* </NotificationConfiguration>

This action replaces the * existing notification configuration with the configuration you include in the * request body.

After Amazon S3 receives this request, it first verifies * that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue * Service (Amazon SQS) destination exists, and that the bucket owner has * permission to publish to it by sending a test notification. In the case of * Lambda destinations, Amazon S3 verifies that the Lambda function permissions * grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For * more information, see Configuring * Notifications for Amazon S3 Events.

You can disable notifications by * adding the empty NotificationConfiguration element.

By default, only the * bucket owner can configure notifications on a bucket. However, bucket owners can * use a bucket policy to grant permission to other users to set this configuration * with s3:PutBucketNotification permission.

The PUT * notification is an atomic operation. For example, suppose your notification * configuration includes SNS topic, SQS queue, and Lambda function configurations. * When you send a PUT request with this configuration, Amazon S3 sends test * messages to your SNS topic. If the message fails, the entire PUT action will * fail, and Amazon S3 will not add the configuration to your bucket.

*

Responses

If the configuration in the request body includes * only one TopicConfiguration specifying only the * s3:ReducedRedundancyLostObject event type, the response will also * include the x-amz-sns-test-message-id header containing the message * ID of the test notification sent to the topic.

The following action is * related to PutBucketNotificationConfiguration:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketNotificationConfigurationOutcomeCallable PutBucketNotificationConfigurationCallable(const Model::PutBucketNotificationConfigurationRequest& request) const; /** *

Enables notifications of specified events for a bucket. For more information * about event notifications, see Configuring * Event Notifications.

Using this API, you can replace an existing * notification configuration. The configuration is an XML file that defines the * event types that you want Amazon S3 to publish and the destination where you * want Amazon S3 to publish an event notification when it detects an event of the * specified type.

By default, your bucket has no event notifications * configured. That is, the notification configuration will be an empty * NotificationConfiguration.

* <NotificationConfiguration>

* </NotificationConfiguration>

This action replaces the * existing notification configuration with the configuration you include in the * request body.

After Amazon S3 receives this request, it first verifies * that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue * Service (Amazon SQS) destination exists, and that the bucket owner has * permission to publish to it by sending a test notification. In the case of * Lambda destinations, Amazon S3 verifies that the Lambda function permissions * grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For * more information, see Configuring * Notifications for Amazon S3 Events.

You can disable notifications by * adding the empty NotificationConfiguration element.

By default, only the * bucket owner can configure notifications on a bucket. However, bucket owners can * use a bucket policy to grant permission to other users to set this configuration * with s3:PutBucketNotification permission.

The PUT * notification is an atomic operation. For example, suppose your notification * configuration includes SNS topic, SQS queue, and Lambda function configurations. * When you send a PUT request with this configuration, Amazon S3 sends test * messages to your SNS topic. If the message fails, the entire PUT action will * fail, and Amazon S3 will not add the configuration to your bucket.

*

Responses

If the configuration in the request body includes * only one TopicConfiguration specifying only the * s3:ReducedRedundancyLostObject event type, the response will also * include the x-amz-sns-test-message-id header containing the message * ID of the test notification sent to the topic.

The following action is * related to PutBucketNotificationConfiguration:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketNotificationConfigurationAsync(const Model::PutBucketNotificationConfigurationRequest& request, const PutBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates or modifies OwnershipControls for an Amazon S3 bucket. * To use this operation, you must have the * s3:PutBucketOwnershipControls permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * PutBucketOwnershipControls:

See Also:

AWS * API Reference

*/ virtual Model::PutBucketOwnershipControlsOutcome PutBucketOwnershipControls(const Model::PutBucketOwnershipControlsRequest& request) const; /** *

Creates or modifies OwnershipControls for an Amazon S3 bucket. * To use this operation, you must have the * s3:PutBucketOwnershipControls permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * PutBucketOwnershipControls:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketOwnershipControlsOutcomeCallable PutBucketOwnershipControlsCallable(const Model::PutBucketOwnershipControlsRequest& request) const; /** *

Creates or modifies OwnershipControls for an Amazon S3 bucket. * To use this operation, you must have the * s3:PutBucketOwnershipControls permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

For information about Amazon S3 Object * Ownership, see Using * Object Ownership.

The following operations are related to * PutBucketOwnershipControls:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketOwnershipControlsAsync(const Model::PutBucketOwnershipControlsRequest& request, const PutBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using * an identity other than the root user of the Amazon Web Services account that * owns the bucket, the calling identity must have the PutBucketPolicy * permissions on the specified bucket and belong to the bucket owner's account in * order to use this operation.

If you don't have * PutBucketPolicy permissions, Amazon S3 returns a 403 Access * Denied error. If you have the correct permissions, but you're not using * an identity that belongs to the bucket owner's account, Amazon S3 returns a * 405 Method Not Allowed error.

As a security * precaution, the root user of the Amazon Web Services account that owns a bucket * can always use this operation, even if the policy explicitly denies the root * user the ability to perform this action.

For more * information, see Bucket * policy examples.

The following operations are related to * PutBucketPolicy:

See Also:

AWS * API Reference

*/ virtual Model::PutBucketPolicyOutcome PutBucketPolicy(const Model::PutBucketPolicyRequest& request) const; /** *

Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using * an identity other than the root user of the Amazon Web Services account that * owns the bucket, the calling identity must have the PutBucketPolicy * permissions on the specified bucket and belong to the bucket owner's account in * order to use this operation.

If you don't have * PutBucketPolicy permissions, Amazon S3 returns a 403 Access * Denied error. If you have the correct permissions, but you're not using * an identity that belongs to the bucket owner's account, Amazon S3 returns a * 405 Method Not Allowed error.

As a security * precaution, the root user of the Amazon Web Services account that owns a bucket * can always use this operation, even if the policy explicitly denies the root * user the ability to perform this action.

For more * information, see Bucket * policy examples.

The following operations are related to * PutBucketPolicy:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketPolicyOutcomeCallable PutBucketPolicyCallable(const Model::PutBucketPolicyRequest& request) const; /** *

Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using * an identity other than the root user of the Amazon Web Services account that * owns the bucket, the calling identity must have the PutBucketPolicy * permissions on the specified bucket and belong to the bucket owner's account in * order to use this operation.

If you don't have * PutBucketPolicy permissions, Amazon S3 returns a 403 Access * Denied error. If you have the correct permissions, but you're not using * an identity that belongs to the bucket owner's account, Amazon S3 returns a * 405 Method Not Allowed error.

As a security * precaution, the root user of the Amazon Web Services account that owns a bucket * can always use this operation, even if the policy explicitly denies the root * user the ability to perform this action.

For more * information, see Bucket * policy examples.

The following operations are related to * PutBucketPolicy:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketPolicyAsync(const Model::PutBucketPolicyRequest& request, const PutBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a replication configuration or replaces an existing one. For more * information, see Replication * in the Amazon S3 User Guide.

Specify the replication * configuration in the request body. In the replication configuration, you provide * the name of the destination bucket or buckets where you want Amazon S3 to * replicate objects, the IAM role that Amazon S3 can assume to replicate objects * on your behalf, and other relevant information.

A replication * configuration must include at least one rule, and can contain a maximum of * 1,000. Each rule identifies a subset of objects to replicate by filtering the * objects in the source bucket. To choose additional subsets of objects to * replicate, add a rule for each subset.

To specify a subset of the objects * in the source bucket to apply a replication rule to, add the Filter element as a * child of the Rule element. You can filter objects based on an object key prefix, * one or more object tags, or both. When you add the Filter element in the * configuration, you must also add the following elements: * DeleteMarkerReplication, Status, and * Priority.

If you are using an earlier version of the * replication configuration, Amazon S3 handles replication of delete markers * differently. For more information, see Backward * Compatibility.

For information about enabling versioning on a * bucket, see Using * Versioning.

Handling Replication of Encrypted Objects

*

By default, Amazon S3 doesn't replicate objects that are stored at rest using * server-side encryption with KMS keys. To replicate Amazon Web Services * KMS-encrypted objects, add the following: SourceSelectionCriteria, * SseKmsEncryptedObjects, Status, * EncryptionConfiguration, and ReplicaKmsKeyID. For * information about replication configuration, see Replicating * Objects Created with SSE Using KMS keys.

For information on * PutBucketReplication errors, see List * of replication-related error codes

Permissions

To * create a PutBucketReplication request, you must have * s3:PutReplicationConfiguration permissions for the bucket.

*

By default, a resource owner, in this case the Amazon Web Services account * that created the bucket, can perform this operation. The resource owner can also * grant others permissions to perform the operation. For more information about * permissions, see Specifying * Permissions in a Policy and Managing * Access Permissions to Your Amazon S3 Resources.

To perform * this operation, the user or role performing the action must have the iam:PassRole * permission.

The following operations are related to * PutBucketReplication:

See Also:

AWS * API Reference

*/ virtual Model::PutBucketReplicationOutcome PutBucketReplication(const Model::PutBucketReplicationRequest& request) const; /** *

Creates a replication configuration or replaces an existing one. For more * information, see Replication * in the Amazon S3 User Guide.

Specify the replication * configuration in the request body. In the replication configuration, you provide * the name of the destination bucket or buckets where you want Amazon S3 to * replicate objects, the IAM role that Amazon S3 can assume to replicate objects * on your behalf, and other relevant information.

A replication * configuration must include at least one rule, and can contain a maximum of * 1,000. Each rule identifies a subset of objects to replicate by filtering the * objects in the source bucket. To choose additional subsets of objects to * replicate, add a rule for each subset.

To specify a subset of the objects * in the source bucket to apply a replication rule to, add the Filter element as a * child of the Rule element. You can filter objects based on an object key prefix, * one or more object tags, or both. When you add the Filter element in the * configuration, you must also add the following elements: * DeleteMarkerReplication, Status, and * Priority.

If you are using an earlier version of the * replication configuration, Amazon S3 handles replication of delete markers * differently. For more information, see Backward * Compatibility.

For information about enabling versioning on a * bucket, see Using * Versioning.

Handling Replication of Encrypted Objects

*

By default, Amazon S3 doesn't replicate objects that are stored at rest using * server-side encryption with KMS keys. To replicate Amazon Web Services * KMS-encrypted objects, add the following: SourceSelectionCriteria, * SseKmsEncryptedObjects, Status, * EncryptionConfiguration, and ReplicaKmsKeyID. For * information about replication configuration, see Replicating * Objects Created with SSE Using KMS keys.

For information on * PutBucketReplication errors, see List * of replication-related error codes

Permissions

To * create a PutBucketReplication request, you must have * s3:PutReplicationConfiguration permissions for the bucket.

*

By default, a resource owner, in this case the Amazon Web Services account * that created the bucket, can perform this operation. The resource owner can also * grant others permissions to perform the operation. For more information about * permissions, see Specifying * Permissions in a Policy and Managing * Access Permissions to Your Amazon S3 Resources.

To perform * this operation, the user or role performing the action must have the iam:PassRole * permission.

The following operations are related to * PutBucketReplication:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketReplicationOutcomeCallable PutBucketReplicationCallable(const Model::PutBucketReplicationRequest& request) const; /** *

Creates a replication configuration or replaces an existing one. For more * information, see Replication * in the Amazon S3 User Guide.

Specify the replication * configuration in the request body. In the replication configuration, you provide * the name of the destination bucket or buckets where you want Amazon S3 to * replicate objects, the IAM role that Amazon S3 can assume to replicate objects * on your behalf, and other relevant information.

A replication * configuration must include at least one rule, and can contain a maximum of * 1,000. Each rule identifies a subset of objects to replicate by filtering the * objects in the source bucket. To choose additional subsets of objects to * replicate, add a rule for each subset.

To specify a subset of the objects * in the source bucket to apply a replication rule to, add the Filter element as a * child of the Rule element. You can filter objects based on an object key prefix, * one or more object tags, or both. When you add the Filter element in the * configuration, you must also add the following elements: * DeleteMarkerReplication, Status, and * Priority.

If you are using an earlier version of the * replication configuration, Amazon S3 handles replication of delete markers * differently. For more information, see Backward * Compatibility.

For information about enabling versioning on a * bucket, see Using * Versioning.

Handling Replication of Encrypted Objects

*

By default, Amazon S3 doesn't replicate objects that are stored at rest using * server-side encryption with KMS keys. To replicate Amazon Web Services * KMS-encrypted objects, add the following: SourceSelectionCriteria, * SseKmsEncryptedObjects, Status, * EncryptionConfiguration, and ReplicaKmsKeyID. For * information about replication configuration, see Replicating * Objects Created with SSE Using KMS keys.

For information on * PutBucketReplication errors, see List * of replication-related error codes

Permissions

To * create a PutBucketReplication request, you must have * s3:PutReplicationConfiguration permissions for the bucket.

*

By default, a resource owner, in this case the Amazon Web Services account * that created the bucket, can perform this operation. The resource owner can also * grant others permissions to perform the operation. For more information about * permissions, see Specifying * Permissions in a Policy and Managing * Access Permissions to Your Amazon S3 Resources.

To perform * this operation, the user or role performing the action must have the iam:PassRole * permission.

The following operations are related to * PutBucketReplication:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketReplicationAsync(const Model::PutBucketReplicationRequest& request, const PutBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets the request payment configuration for a bucket. By default, the bucket * owner pays for downloads from the bucket. This configuration parameter enables * the bucket owner (only) to specify that the person requesting the download will * be charged for the download. For more information, see Requester * Pays Buckets.

The following operations are related to * PutBucketRequestPayment:

See Also:

AWS * API Reference

*/ virtual Model::PutBucketRequestPaymentOutcome PutBucketRequestPayment(const Model::PutBucketRequestPaymentRequest& request) const; /** *

Sets the request payment configuration for a bucket. By default, the bucket * owner pays for downloads from the bucket. This configuration parameter enables * the bucket owner (only) to specify that the person requesting the download will * be charged for the download. For more information, see Requester * Pays Buckets.

The following operations are related to * PutBucketRequestPayment:

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketRequestPaymentOutcomeCallable PutBucketRequestPaymentCallable(const Model::PutBucketRequestPaymentRequest& request) const; /** *

Sets the request payment configuration for a bucket. By default, the bucket * owner pays for downloads from the bucket. This configuration parameter enables * the bucket owner (only) to specify that the person requesting the download will * be charged for the download. For more information, see Requester * Pays Buckets.

The following operations are related to * PutBucketRequestPayment:

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketRequestPaymentAsync(const Model::PutBucketRequestPaymentRequest& request, const PutBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets the tags for a bucket.

Use tags to organize your Amazon Web * Services bill to reflect your own cost structure. To do this, sign up to get * your Amazon Web Services account bill with tag key values included. Then, to see * the cost of combined resources, organize your billing information according to * resources with the same tag key values. For example, you can tag several * resources with a specific application name, and then organize your billing * information to see the total cost of that application across several services. * For more information, see Cost * Allocation and Tagging and Using * Cost Allocation in Amazon S3 Bucket Tags.

When this operation * sets the tags for a bucket, it will overwrite any current tags the bucket * already has. You cannot use this operation to add tags to an existing list of * tags.

To use this operation, you must have permissions to perform * the s3:PutBucketTagging action. The bucket owner has this * permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* PutBucketTagging has the following special errors:

  • *

    Error code: InvalidTagError

    *
  • Error code: MalformedXMLError

    • *

      Description: The XML provided does not match the schema.

    *
  • Error code: OperationAbortedError

    • *

      Description: A conflicting conditional action is currently in progress * against this resource. Please try again.

  • Error * code: InternalError

    • Description: The service was * unable to apply the provided tag to the bucket.

*

The following operations are related to PutBucketTagging:

*

See Also:

AWS * API Reference

*/ virtual Model::PutBucketTaggingOutcome PutBucketTagging(const Model::PutBucketTaggingRequest& request) const; /** *

Sets the tags for a bucket.

Use tags to organize your Amazon Web * Services bill to reflect your own cost structure. To do this, sign up to get * your Amazon Web Services account bill with tag key values included. Then, to see * the cost of combined resources, organize your billing information according to * resources with the same tag key values. For example, you can tag several * resources with a specific application name, and then organize your billing * information to see the total cost of that application across several services. * For more information, see Cost * Allocation and Tagging and Using * Cost Allocation in Amazon S3 Bucket Tags.

When this operation * sets the tags for a bucket, it will overwrite any current tags the bucket * already has. You cannot use this operation to add tags to an existing list of * tags.

To use this operation, you must have permissions to perform * the s3:PutBucketTagging action. The bucket owner has this * permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* PutBucketTagging has the following special errors:

  • *

    Error code: InvalidTagError

    *
  • Error code: MalformedXMLError

    • *

      Description: The XML provided does not match the schema.

    *
  • Error code: OperationAbortedError

    • *

      Description: A conflicting conditional action is currently in progress * against this resource. Please try again.

  • Error * code: InternalError

    • Description: The service was * unable to apply the provided tag to the bucket.

*

The following operations are related to PutBucketTagging:

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketTaggingOutcomeCallable PutBucketTaggingCallable(const Model::PutBucketTaggingRequest& request) const; /** *

Sets the tags for a bucket.

Use tags to organize your Amazon Web * Services bill to reflect your own cost structure. To do this, sign up to get * your Amazon Web Services account bill with tag key values included. Then, to see * the cost of combined resources, organize your billing information according to * resources with the same tag key values. For example, you can tag several * resources with a specific application name, and then organize your billing * information to see the total cost of that application across several services. * For more information, see Cost * Allocation and Tagging and Using * Cost Allocation in Amazon S3 Bucket Tags.

When this operation * sets the tags for a bucket, it will overwrite any current tags the bucket * already has. You cannot use this operation to add tags to an existing list of * tags.

To use this operation, you must have permissions to perform * the s3:PutBucketTagging action. The bucket owner has this * permission by default and can grant this permission to others. For more * information about permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources.

* PutBucketTagging has the following special errors:

  • *

    Error code: InvalidTagError

    *
  • Error code: MalformedXMLError

    • *

      Description: The XML provided does not match the schema.

    *
  • Error code: OperationAbortedError

    • *

      Description: A conflicting conditional action is currently in progress * against this resource. Please try again.

  • Error * code: InternalError

    • Description: The service was * unable to apply the provided tag to the bucket.

*

The following operations are related to PutBucketTagging:

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketTaggingAsync(const Model::PutBucketTaggingRequest& request, const PutBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets the versioning state of an existing bucket. To set the versioning state, * you must be the bucket owner.

You can set the versioning state with one * of the following values:

Enabled—Enables versioning for the * objects in the bucket. All objects added to the bucket receive a unique version * ID.

Suspended—Disables versioning for the objects in the bucket. * All objects added to the bucket receive the version ID null.

If the * versioning state has never been set on a bucket, it has no versioning state; a * GetBucketVersioning * request does not return a versioning state value.

If the bucket owner * enables MFA Delete in the bucket versioning configuration, the bucket owner must * include the x-amz-mfa request header and the Status * and the MfaDelete request elements in a request to set the * versioning state of the bucket.

If you have an object * expiration lifecycle policy in your non-versioned bucket and you want to * maintain the same permanent delete behavior when you enable versioning, you must * add a noncurrent expiration policy. The noncurrent expiration lifecycle policy * will manage the deletes of the noncurrent object versions in the version-enabled * bucket. (A version-enabled bucket maintains one current and zero or more * noncurrent object versions.) For more information, see Lifecycle * and Versioning.

Related Resources *

See Also:

AWS * API Reference

*/ virtual Model::PutBucketVersioningOutcome PutBucketVersioning(const Model::PutBucketVersioningRequest& request) const; /** *

Sets the versioning state of an existing bucket. To set the versioning state, * you must be the bucket owner.

You can set the versioning state with one * of the following values:

Enabled—Enables versioning for the * objects in the bucket. All objects added to the bucket receive a unique version * ID.

Suspended—Disables versioning for the objects in the bucket. * All objects added to the bucket receive the version ID null.

If the * versioning state has never been set on a bucket, it has no versioning state; a * GetBucketVersioning * request does not return a versioning state value.

If the bucket owner * enables MFA Delete in the bucket versioning configuration, the bucket owner must * include the x-amz-mfa request header and the Status * and the MfaDelete request elements in a request to set the * versioning state of the bucket.

If you have an object * expiration lifecycle policy in your non-versioned bucket and you want to * maintain the same permanent delete behavior when you enable versioning, you must * add a noncurrent expiration policy. The noncurrent expiration lifecycle policy * will manage the deletes of the noncurrent object versions in the version-enabled * bucket. (A version-enabled bucket maintains one current and zero or more * noncurrent object versions.) For more information, see Lifecycle * and Versioning.

Related Resources *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketVersioningOutcomeCallable PutBucketVersioningCallable(const Model::PutBucketVersioningRequest& request) const; /** *

Sets the versioning state of an existing bucket. To set the versioning state, * you must be the bucket owner.

You can set the versioning state with one * of the following values:

Enabled—Enables versioning for the * objects in the bucket. All objects added to the bucket receive a unique version * ID.

Suspended—Disables versioning for the objects in the bucket. * All objects added to the bucket receive the version ID null.

If the * versioning state has never been set on a bucket, it has no versioning state; a * GetBucketVersioning * request does not return a versioning state value.

If the bucket owner * enables MFA Delete in the bucket versioning configuration, the bucket owner must * include the x-amz-mfa request header and the Status * and the MfaDelete request elements in a request to set the * versioning state of the bucket.

If you have an object * expiration lifecycle policy in your non-versioned bucket and you want to * maintain the same permanent delete behavior when you enable versioning, you must * add a noncurrent expiration policy. The noncurrent expiration lifecycle policy * will manage the deletes of the noncurrent object versions in the version-enabled * bucket. (A version-enabled bucket maintains one current and zero or more * noncurrent object versions.) For more information, see Lifecycle * and Versioning.

Related Resources *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketVersioningAsync(const Model::PutBucketVersioningRequest& request, const PutBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets the configuration of the website that is specified in the * website subresource. To configure a bucket as a website, you can * add this subresource on the bucket with website configuration information such * as the file name of the index document and any redirect rules. For more * information, see Hosting * Websites on Amazon S3.

This PUT action requires the * S3:PutBucketWebsite permission. By default, only the bucket owner * can configure the website attached to a bucket; however, bucket owners can allow * other users to set the website configuration by writing a bucket policy that * grants them the S3:PutBucketWebsite permission.

To redirect * all website requests sent to the bucket's website endpoint, you add a website * configuration with the following elements. Because all requests are sent to * another website, you don't need to provide index document name for the * bucket.

  • WebsiteConfiguration

  • * RedirectAllRequestsTo

  • HostName *

  • Protocol

If you want * granular control over redirects, you can use the following elements to add * routing rules that describe conditions for redirecting requests and information * about the redirect destination. In this case, the website configuration must * provide an index document for the bucket, because some requests might not be * redirected.

  • WebsiteConfiguration

  • *

    IndexDocument

  • Suffix

    *
  • ErrorDocument

  • Key *

  • RoutingRules

  • * RoutingRule

  • Condition

  • *
  • HttpErrorCodeReturnedEquals

  • * KeyPrefixEquals

  • Redirect

    *
  • Protocol

  • HostName *

  • ReplaceKeyPrefixWith

  • * ReplaceKeyWith

  • HttpRedirectCode *

Amazon S3 has a limitation of 50 routing rules per website * configuration. If you require more than 50 routing rules, you can use object * redirect. For more information, see Configuring * an Object Redirect in the Amazon S3 User Guide.

See * Also:

AWS * API Reference

*/ virtual Model::PutBucketWebsiteOutcome PutBucketWebsite(const Model::PutBucketWebsiteRequest& request) const; /** *

Sets the configuration of the website that is specified in the * website subresource. To configure a bucket as a website, you can * add this subresource on the bucket with website configuration information such * as the file name of the index document and any redirect rules. For more * information, see Hosting * Websites on Amazon S3.

This PUT action requires the * S3:PutBucketWebsite permission. By default, only the bucket owner * can configure the website attached to a bucket; however, bucket owners can allow * other users to set the website configuration by writing a bucket policy that * grants them the S3:PutBucketWebsite permission.

To redirect * all website requests sent to the bucket's website endpoint, you add a website * configuration with the following elements. Because all requests are sent to * another website, you don't need to provide index document name for the * bucket.

  • WebsiteConfiguration

  • * RedirectAllRequestsTo

  • HostName *

  • Protocol

If you want * granular control over redirects, you can use the following elements to add * routing rules that describe conditions for redirecting requests and information * about the redirect destination. In this case, the website configuration must * provide an index document for the bucket, because some requests might not be * redirected.

  • WebsiteConfiguration

  • *

    IndexDocument

  • Suffix

    *
  • ErrorDocument

  • Key *

  • RoutingRules

  • * RoutingRule

  • Condition

  • *
  • HttpErrorCodeReturnedEquals

  • * KeyPrefixEquals

  • Redirect

    *
  • Protocol

  • HostName *

  • ReplaceKeyPrefixWith

  • * ReplaceKeyWith

  • HttpRedirectCode *

Amazon S3 has a limitation of 50 routing rules per website * configuration. If you require more than 50 routing rules, you can use object * redirect. For more information, see Configuring * an Object Redirect in the Amazon S3 User Guide.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutBucketWebsiteOutcomeCallable PutBucketWebsiteCallable(const Model::PutBucketWebsiteRequest& request) const; /** *

Sets the configuration of the website that is specified in the * website subresource. To configure a bucket as a website, you can * add this subresource on the bucket with website configuration information such * as the file name of the index document and any redirect rules. For more * information, see Hosting * Websites on Amazon S3.

This PUT action requires the * S3:PutBucketWebsite permission. By default, only the bucket owner * can configure the website attached to a bucket; however, bucket owners can allow * other users to set the website configuration by writing a bucket policy that * grants them the S3:PutBucketWebsite permission.

To redirect * all website requests sent to the bucket's website endpoint, you add a website * configuration with the following elements. Because all requests are sent to * another website, you don't need to provide index document name for the * bucket.

  • WebsiteConfiguration

  • * RedirectAllRequestsTo

  • HostName *

  • Protocol

If you want * granular control over redirects, you can use the following elements to add * routing rules that describe conditions for redirecting requests and information * about the redirect destination. In this case, the website configuration must * provide an index document for the bucket, because some requests might not be * redirected.

  • WebsiteConfiguration

  • *

    IndexDocument

  • Suffix

    *
  • ErrorDocument

  • Key *

  • RoutingRules

  • * RoutingRule

  • Condition

  • *
  • HttpErrorCodeReturnedEquals

  • * KeyPrefixEquals

  • Redirect

    *
  • Protocol

  • HostName *

  • ReplaceKeyPrefixWith

  • * ReplaceKeyWith

  • HttpRedirectCode *

Amazon S3 has a limitation of 50 routing rules per website * configuration. If you require more than 50 routing rules, you can use object * redirect. For more information, see Configuring * an Object Redirect in the Amazon S3 User Guide.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutBucketWebsiteAsync(const Model::PutBucketWebsiteRequest& request, const PutBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Adds an object to a bucket. You must have WRITE permissions on a bucket to * add an object to it.

Amazon S3 never adds partial objects; if you receive * a success response, Amazon S3 added the entire object to the bucket.

*

Amazon S3 is a distributed system. If it receives multiple write requests for * the same object simultaneously, it overwrites all but the last object written. * Amazon S3 does not provide object locking; if you need this, make sure to build * it into your application layer or use versioning instead.

To ensure that * data is not corrupted traversing the network, use the Content-MD5 * header. When you use this header, Amazon S3 checks the object against the * provided MD5 value and, if they do not match, returns an error. Additionally, * you can calculate the MD5 while putting an object to Amazon S3 and compare the * returned ETag to the calculated MD5 value.

  • To * successfully complete the PutObject request, you must have the * s3:PutObject in your IAM permissions.

  • To * successfully change the objects acl of your PutObject request, you * must have the s3:PutObjectAcl in your IAM permissions.

  • *
  • The Content-MD5 header is required for any request to * upload an object with a retention period configured using Amazon S3 Object Lock. * For more information about Amazon S3 Object Lock, see Amazon * S3 Object Lock Overview in the Amazon S3 User Guide.

*

Server-side Encryption

You can optionally request * server-side encryption. With server-side encryption, Amazon S3 encrypts your * data as it writes it to disks in its data centers and decrypts the data when you * access it. You have the option to provide your own encryption key or use Amazon * Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information, * see Using * Server-Side Encryption.

If you request server-side encryption using * Amazon Web Services Key Management Service (SSE-KMS), you can enable an S3 * Bucket Key at the object-level. For more information, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

Access Control * List (ACL)-Specific Request Headers

You can use headers to grant * ACL- based permissions. By default, all objects are private. Only the owner has * full access control. When adding a new object, you can grant permissions to * individual Amazon Web Services accounts or to predefined groups defined by * Amazon S3. These permissions are then added to the ACL on the object. For more * information, see Access * Control List (ACL) Overview and Managing * ACLs Using the REST API.

Storage Class Options

By * default, Amazon S3 uses the STANDARD Storage Class to store newly created * objects. The STANDARD storage class provides high durability and high * availability. Depending on performance needs, you can specify a different * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For * more information, see Storage * Classes in the Amazon S3 User Guide.

Versioning

*

If you enable versioning for a bucket, Amazon S3 automatically generates a * unique version ID for the object being stored. Amazon S3 returns this ID in the * response. When you enable versioning for a bucket, if Amazon S3 receives * multiple write requests for the same object simultaneously, it stores all of the * objects.

For more information about versioning, see Adding * Objects to Versioning Enabled Buckets. For information about returning the * versioning state of a bucket, see GetBucketVersioning. *

Related Resources

See Also:

AWS API * Reference

*/ virtual Model::PutObjectOutcome PutObject(const Model::PutObjectRequest& request) const; /** *

Adds an object to a bucket. You must have WRITE permissions on a bucket to * add an object to it.

Amazon S3 never adds partial objects; if you receive * a success response, Amazon S3 added the entire object to the bucket.

*

Amazon S3 is a distributed system. If it receives multiple write requests for * the same object simultaneously, it overwrites all but the last object written. * Amazon S3 does not provide object locking; if you need this, make sure to build * it into your application layer or use versioning instead.

To ensure that * data is not corrupted traversing the network, use the Content-MD5 * header. When you use this header, Amazon S3 checks the object against the * provided MD5 value and, if they do not match, returns an error. Additionally, * you can calculate the MD5 while putting an object to Amazon S3 and compare the * returned ETag to the calculated MD5 value.

  • To * successfully complete the PutObject request, you must have the * s3:PutObject in your IAM permissions.

  • To * successfully change the objects acl of your PutObject request, you * must have the s3:PutObjectAcl in your IAM permissions.

  • *
  • The Content-MD5 header is required for any request to * upload an object with a retention period configured using Amazon S3 Object Lock. * For more information about Amazon S3 Object Lock, see Amazon * S3 Object Lock Overview in the Amazon S3 User Guide.

*

Server-side Encryption

You can optionally request * server-side encryption. With server-side encryption, Amazon S3 encrypts your * data as it writes it to disks in its data centers and decrypts the data when you * access it. You have the option to provide your own encryption key or use Amazon * Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information, * see Using * Server-Side Encryption.

If you request server-side encryption using * Amazon Web Services Key Management Service (SSE-KMS), you can enable an S3 * Bucket Key at the object-level. For more information, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

Access Control * List (ACL)-Specific Request Headers

You can use headers to grant * ACL- based permissions. By default, all objects are private. Only the owner has * full access control. When adding a new object, you can grant permissions to * individual Amazon Web Services accounts or to predefined groups defined by * Amazon S3. These permissions are then added to the ACL on the object. For more * information, see Access * Control List (ACL) Overview and Managing * ACLs Using the REST API.

Storage Class Options

By * default, Amazon S3 uses the STANDARD Storage Class to store newly created * objects. The STANDARD storage class provides high durability and high * availability. Depending on performance needs, you can specify a different * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For * more information, see Storage * Classes in the Amazon S3 User Guide.

Versioning

*

If you enable versioning for a bucket, Amazon S3 automatically generates a * unique version ID for the object being stored. Amazon S3 returns this ID in the * response. When you enable versioning for a bucket, if Amazon S3 receives * multiple write requests for the same object simultaneously, it stores all of the * objects.

For more information about versioning, see Adding * Objects to Versioning Enabled Buckets. For information about returning the * versioning state of a bucket, see GetBucketVersioning. *

Related Resources

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutObjectOutcomeCallable PutObjectCallable(const Model::PutObjectRequest& request) const; /** *

Adds an object to a bucket. You must have WRITE permissions on a bucket to * add an object to it.

Amazon S3 never adds partial objects; if you receive * a success response, Amazon S3 added the entire object to the bucket.

*

Amazon S3 is a distributed system. If it receives multiple write requests for * the same object simultaneously, it overwrites all but the last object written. * Amazon S3 does not provide object locking; if you need this, make sure to build * it into your application layer or use versioning instead.

To ensure that * data is not corrupted traversing the network, use the Content-MD5 * header. When you use this header, Amazon S3 checks the object against the * provided MD5 value and, if they do not match, returns an error. Additionally, * you can calculate the MD5 while putting an object to Amazon S3 and compare the * returned ETag to the calculated MD5 value.

  • To * successfully complete the PutObject request, you must have the * s3:PutObject in your IAM permissions.

  • To * successfully change the objects acl of your PutObject request, you * must have the s3:PutObjectAcl in your IAM permissions.

  • *
  • The Content-MD5 header is required for any request to * upload an object with a retention period configured using Amazon S3 Object Lock. * For more information about Amazon S3 Object Lock, see Amazon * S3 Object Lock Overview in the Amazon S3 User Guide.

*

Server-side Encryption

You can optionally request * server-side encryption. With server-side encryption, Amazon S3 encrypts your * data as it writes it to disks in its data centers and decrypts the data when you * access it. You have the option to provide your own encryption key or use Amazon * Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information, * see Using * Server-Side Encryption.

If you request server-side encryption using * Amazon Web Services Key Management Service (SSE-KMS), you can enable an S3 * Bucket Key at the object-level. For more information, see Amazon S3 * Bucket Keys in the Amazon S3 User Guide.

Access Control * List (ACL)-Specific Request Headers

You can use headers to grant * ACL- based permissions. By default, all objects are private. Only the owner has * full access control. When adding a new object, you can grant permissions to * individual Amazon Web Services accounts or to predefined groups defined by * Amazon S3. These permissions are then added to the ACL on the object. For more * information, see Access * Control List (ACL) Overview and Managing * ACLs Using the REST API.

Storage Class Options

By * default, Amazon S3 uses the STANDARD Storage Class to store newly created * objects. The STANDARD storage class provides high durability and high * availability. Depending on performance needs, you can specify a different * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For * more information, see Storage * Classes in the Amazon S3 User Guide.

Versioning

*

If you enable versioning for a bucket, Amazon S3 automatically generates a * unique version ID for the object being stored. Amazon S3 returns this ID in the * response. When you enable versioning for a bucket, if Amazon S3 receives * multiple write requests for the same object simultaneously, it stores all of the * objects.

For more information about versioning, see Adding * Objects to Versioning Enabled Buckets. For information about returning the * versioning state of a bucket, see GetBucketVersioning. *

Related Resources

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutObjectAsync(const Model::PutObjectRequest& request, const PutObjectResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Uses the acl subresource to set the access control list (ACL) * permissions for a new or existing object in an S3 bucket. You must have * WRITE_ACP permission to set the ACL of an object. For more * information, see What * permissions can I grant? in the Amazon S3 User Guide.

This * action is not supported by Amazon S3 on Outposts.

Depending on your * application needs, you can choose to set the ACL on an object using either the * request body or the headers. For example, if you have an existing application * that updates a bucket ACL using the request body, you can continue to use that * approach. For more information, see Access * Control List (ACL) Overview in the Amazon S3 User Guide.

* Access Permissions

You can set access permissions using one of * the following methods:

  • Specify a canned ACL with the * x-amz-acl request header. Amazon S3 supports a set of predefined * ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and * permissions. Specify the canned ACL name as the value of x-amz-acl. * If you use this header, you cannot use other access control-specific headers in * your request. For more information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. When using these headers, you specify explicit access permissions and * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use * x-amz-acl header to set a canned ACL. These parameters map to the * set of permissions that Amazon S3 supports in an ACL. For more information, see * Access * Control List (ACL) Overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants list objects permission to the two Amazon Web Services accounts * identified by their email addresses.

    x-amz-grant-read: * emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com"

  • *

You can use either a canned ACL or specify access permissions * explicitly. You cannot do both.

Grantee Values

You can * specify the person (grantee) to whom you're assigning access rights (using * request elements) in the following ways:

  • By the person's * ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request.

  • By URI:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

    Using email * addresses to specify a grantee is only supported in the following Amazon Web * Services Regions:

    • US East (N. Virginia)

    • US * West (N. California)

    • US West (Oregon)

    • Asia * Pacific (Singapore)

    • Asia Pacific (Sydney)

    • *

      Asia Pacific (Tokyo)

    • Europe (Ireland)

    • *

      South America (São Paulo)

    For a list of all the Amazon S3 * supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

    *

Versioning

The ACL of an object is set at the * object version level. By default, PUT sets the ACL of the current version of an * object. To set the ACL of a different version, use the versionId * subresource.

Related Resources

See Also:

AWS API * Reference

*/ virtual Model::PutObjectAclOutcome PutObjectAcl(const Model::PutObjectAclRequest& request) const; /** *

Uses the acl subresource to set the access control list (ACL) * permissions for a new or existing object in an S3 bucket. You must have * WRITE_ACP permission to set the ACL of an object. For more * information, see What * permissions can I grant? in the Amazon S3 User Guide.

This * action is not supported by Amazon S3 on Outposts.

Depending on your * application needs, you can choose to set the ACL on an object using either the * request body or the headers. For example, if you have an existing application * that updates a bucket ACL using the request body, you can continue to use that * approach. For more information, see Access * Control List (ACL) Overview in the Amazon S3 User Guide.

* Access Permissions

You can set access permissions using one of * the following methods:

  • Specify a canned ACL with the * x-amz-acl request header. Amazon S3 supports a set of predefined * ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and * permissions. Specify the canned ACL name as the value of x-amz-acl. * If you use this header, you cannot use other access control-specific headers in * your request. For more information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. When using these headers, you specify explicit access permissions and * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use * x-amz-acl header to set a canned ACL. These parameters map to the * set of permissions that Amazon S3 supports in an ACL. For more information, see * Access * Control List (ACL) Overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants list objects permission to the two Amazon Web Services accounts * identified by their email addresses.

    x-amz-grant-read: * emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com"

  • *

You can use either a canned ACL or specify access permissions * explicitly. You cannot do both.

Grantee Values

You can * specify the person (grantee) to whom you're assigning access rights (using * request elements) in the following ways:

  • By the person's * ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request.

  • By URI:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

    Using email * addresses to specify a grantee is only supported in the following Amazon Web * Services Regions:

    • US East (N. Virginia)

    • US * West (N. California)

    • US West (Oregon)

    • Asia * Pacific (Singapore)

    • Asia Pacific (Sydney)

    • *

      Asia Pacific (Tokyo)

    • Europe (Ireland)

    • *

      South America (São Paulo)

    For a list of all the Amazon S3 * supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

    *

Versioning

The ACL of an object is set at the * object version level. By default, PUT sets the ACL of the current version of an * object. To set the ACL of a different version, use the versionId * subresource.

Related Resources

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutObjectAclOutcomeCallable PutObjectAclCallable(const Model::PutObjectAclRequest& request) const; /** *

Uses the acl subresource to set the access control list (ACL) * permissions for a new or existing object in an S3 bucket. You must have * WRITE_ACP permission to set the ACL of an object. For more * information, see What * permissions can I grant? in the Amazon S3 User Guide.

This * action is not supported by Amazon S3 on Outposts.

Depending on your * application needs, you can choose to set the ACL on an object using either the * request body or the headers. For example, if you have an existing application * that updates a bucket ACL using the request body, you can continue to use that * approach. For more information, see Access * Control List (ACL) Overview in the Amazon S3 User Guide.

* Access Permissions

You can set access permissions using one of * the following methods:

  • Specify a canned ACL with the * x-amz-acl request header. Amazon S3 supports a set of predefined * ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and * permissions. Specify the canned ACL name as the value of x-amz-acl. * If you use this header, you cannot use other access control-specific headers in * your request. For more information, see Canned * ACL.

  • Specify access permissions explicitly with the * x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. When using these headers, you specify explicit access permissions and * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use * x-amz-acl header to set a canned ACL. These parameters map to the * set of permissions that Amazon S3 supports in an ACL. For more information, see * Access * Control List (ACL) Overview.

    You specify each grantee as a type=value * pair, where the type is one of the following:

    • id * – if the value specified is the canonical user ID of an Amazon Web Services * account

    • uri – if you are granting permissions to * a predefined group

    • emailAddress – if the value * specified is the email address of an Amazon Web Services account

      *

      Using email addresses to specify a grantee is only supported in the following * Amazon Web Services Regions:

      • US East (N. Virginia)

      • *
      • US West (N. California)

      • US West (Oregon)

      • *
      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

        *
      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • *
      • South America (São Paulo)

      For a list of all the * Amazon S3 supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

      *

    For example, the following x-amz-grant-read header * grants list objects permission to the two Amazon Web Services accounts * identified by their email addresses.

    x-amz-grant-read: * emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com"

  • *

You can use either a canned ACL or specify access permissions * explicitly. You cannot do both.

Grantee Values

You can * specify the person (grantee) to whom you're assigning access rights (using * request elements) in the following ways:

  • By the person's * ID:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> * </Grantee>

    DisplayName is optional and ignored in the * request.

  • By URI:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

  • By Email address:

    <Grantee * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" * xsi:type="AmazonCustomerByEmail"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee> *

    The grantee is resolved to the CanonicalUser and, in a response to a GET * Object acl request, appears as the CanonicalUser.

    Using email * addresses to specify a grantee is only supported in the following Amazon Web * Services Regions:

    • US East (N. Virginia)

    • US * West (N. California)

    • US West (Oregon)

    • Asia * Pacific (Singapore)

    • Asia Pacific (Sydney)

    • *

      Asia Pacific (Tokyo)

    • Europe (Ireland)

    • *

      South America (São Paulo)

    For a list of all the Amazon S3 * supported Regions and endpoints, see Regions * and Endpoints in the Amazon Web Services General Reference.

    *

Versioning

The ACL of an object is set at the * object version level. By default, PUT sets the ACL of the current version of an * object. To set the ACL of a different version, use the versionId * subresource.

Related Resources

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutObjectAclAsync(const Model::PutObjectAclRequest& request, const PutObjectAclResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Applies a Legal Hold configuration to the specified object. For more * information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

*/ virtual Model::PutObjectLegalHoldOutcome PutObjectLegalHold(const Model::PutObjectLegalHoldRequest& request) const; /** *

Applies a Legal Hold configuration to the specified object. For more * information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutObjectLegalHoldOutcomeCallable PutObjectLegalHoldCallable(const Model::PutObjectLegalHoldRequest& request) const; /** *

Applies a Legal Hold configuration to the specified object. For more * information, see Locking * Objects.

This action is not supported by Amazon S3 on * Outposts.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutObjectLegalHoldAsync(const Model::PutObjectLegalHoldRequest& request, const PutObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Places an Object Lock configuration on the specified bucket. The rule * specified in the Object Lock configuration will be applied by default to every * new object placed in the specified bucket. For more information, see Locking * Objects.

  • The DefaultRetention settings * require both a mode and a period.

  • The * DefaultRetention period can be either Days or * Years but you must select one. You cannot specify Days * and Years at the same time.

  • You can only enable * Object Lock for new buckets. If you want to turn on Object Lock for an existing * bucket, contact Amazon Web Services Support.

See * Also:

AWS * API Reference

*/ virtual Model::PutObjectLockConfigurationOutcome PutObjectLockConfiguration(const Model::PutObjectLockConfigurationRequest& request) const; /** *

Places an Object Lock configuration on the specified bucket. The rule * specified in the Object Lock configuration will be applied by default to every * new object placed in the specified bucket. For more information, see Locking * Objects.

  • The DefaultRetention settings * require both a mode and a period.

  • The * DefaultRetention period can be either Days or * Years but you must select one. You cannot specify Days * and Years at the same time.

  • You can only enable * Object Lock for new buckets. If you want to turn on Object Lock for an existing * bucket, contact Amazon Web Services Support.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutObjectLockConfigurationOutcomeCallable PutObjectLockConfigurationCallable(const Model::PutObjectLockConfigurationRequest& request) const; /** *

Places an Object Lock configuration on the specified bucket. The rule * specified in the Object Lock configuration will be applied by default to every * new object placed in the specified bucket. For more information, see Locking * Objects.

  • The DefaultRetention settings * require both a mode and a period.

  • The * DefaultRetention period can be either Days or * Years but you must select one. You cannot specify Days * and Years at the same time.

  • You can only enable * Object Lock for new buckets. If you want to turn on Object Lock for an existing * bucket, contact Amazon Web Services Support.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutObjectLockConfigurationAsync(const Model::PutObjectLockConfigurationRequest& request, const PutObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Places an Object Retention configuration on an object. For more information, * see Locking * Objects. Users or accounts require the s3:PutObjectRetention * permission in order to place an Object Retention configuration on objects. * Bypassing a Governance Retention configuration requires the * s3:BypassGovernanceRetention permission.

This action is not * supported by Amazon S3 on Outposts.

Permissions

When the * Object Lock retention mode is set to compliance, you need * s3:PutObjectRetention and s3:BypassGovernanceRetention * permissions. For other requests to PutObjectRetention, only * s3:PutObjectRetention permissions are required.

See * Also:

AWS * API Reference

*/ virtual Model::PutObjectRetentionOutcome PutObjectRetention(const Model::PutObjectRetentionRequest& request) const; /** *

Places an Object Retention configuration on an object. For more information, * see Locking * Objects. Users or accounts require the s3:PutObjectRetention * permission in order to place an Object Retention configuration on objects. * Bypassing a Governance Retention configuration requires the * s3:BypassGovernanceRetention permission.

This action is not * supported by Amazon S3 on Outposts.

Permissions

When the * Object Lock retention mode is set to compliance, you need * s3:PutObjectRetention and s3:BypassGovernanceRetention * permissions. For other requests to PutObjectRetention, only * s3:PutObjectRetention permissions are required.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutObjectRetentionOutcomeCallable PutObjectRetentionCallable(const Model::PutObjectRetentionRequest& request) const; /** *

Places an Object Retention configuration on an object. For more information, * see Locking * Objects. Users or accounts require the s3:PutObjectRetention * permission in order to place an Object Retention configuration on objects. * Bypassing a Governance Retention configuration requires the * s3:BypassGovernanceRetention permission.

This action is not * supported by Amazon S3 on Outposts.

Permissions

When the * Object Lock retention mode is set to compliance, you need * s3:PutObjectRetention and s3:BypassGovernanceRetention * permissions. For other requests to PutObjectRetention, only * s3:PutObjectRetention permissions are required.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutObjectRetentionAsync(const Model::PutObjectRetentionRequest& request, const PutObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Sets the supplied tag-set to an object that already exists in a bucket.

*

A tag is a key-value pair. You can associate tags with an object by sending a * PUT request against the tagging subresource that is associated with the object. * You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

*

For tagging-related restrictions related to characters and encodings, see Tag * Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 * tags per object.

To use this operation, you must have permission to * perform the s3:PutObjectTagging action. By default, the bucket * owner has this permission and can grant this permission to others.

To put * tags of any other version, use the versionId query parameter. You * also need permission for the s3:PutObjectVersionTagging action.

*

For information about the Amazon S3 object tagging feature, see Object * Tagging.

Special Errors

    • *
    • Code: InvalidTagError

    • Cause: The tag * provided was not a valid tag. This error can occur if the tag did not pass input * validation. For more information, see Object * Tagging.

    • Code: * MalformedXMLError

    • Cause: The XML provided does not * match the schema.

    • Code: * OperationAbortedError

    • Cause: A conflicting * conditional action is currently in progress against this resource. Please try * again.

    • Code: InternalError *

    • Cause: The service was unable to apply the provided tag * to the object.

Related * Resources

See Also:

AWS * API Reference

*/ virtual Model::PutObjectTaggingOutcome PutObjectTagging(const Model::PutObjectTaggingRequest& request) const; /** *

Sets the supplied tag-set to an object that already exists in a bucket.

*

A tag is a key-value pair. You can associate tags with an object by sending a * PUT request against the tagging subresource that is associated with the object. * You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

*

For tagging-related restrictions related to characters and encodings, see Tag * Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 * tags per object.

To use this operation, you must have permission to * perform the s3:PutObjectTagging action. By default, the bucket * owner has this permission and can grant this permission to others.

To put * tags of any other version, use the versionId query parameter. You * also need permission for the s3:PutObjectVersionTagging action.

*

For information about the Amazon S3 object tagging feature, see Object * Tagging.

Special Errors

    • *
    • Code: InvalidTagError

    • Cause: The tag * provided was not a valid tag. This error can occur if the tag did not pass input * validation. For more information, see Object * Tagging.

    • Code: * MalformedXMLError

    • Cause: The XML provided does not * match the schema.

    • Code: * OperationAbortedError

    • Cause: A conflicting * conditional action is currently in progress against this resource. Please try * again.

    • Code: InternalError *

    • Cause: The service was unable to apply the provided tag * to the object.

Related * Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutObjectTaggingOutcomeCallable PutObjectTaggingCallable(const Model::PutObjectTaggingRequest& request) const; /** *

Sets the supplied tag-set to an object that already exists in a bucket.

*

A tag is a key-value pair. You can associate tags with an object by sending a * PUT request against the tagging subresource that is associated with the object. * You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

*

For tagging-related restrictions related to characters and encodings, see Tag * Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 * tags per object.

To use this operation, you must have permission to * perform the s3:PutObjectTagging action. By default, the bucket * owner has this permission and can grant this permission to others.

To put * tags of any other version, use the versionId query parameter. You * also need permission for the s3:PutObjectVersionTagging action.

*

For information about the Amazon S3 object tagging feature, see Object * Tagging.

Special Errors

    • *
    • Code: InvalidTagError

    • Cause: The tag * provided was not a valid tag. This error can occur if the tag did not pass input * validation. For more information, see Object * Tagging.

    • Code: * MalformedXMLError

    • Cause: The XML provided does not * match the schema.

    • Code: * OperationAbortedError

    • Cause: A conflicting * conditional action is currently in progress against this resource. Please try * again.

    • Code: InternalError *

    • Cause: The service was unable to apply the provided tag * to the object.

Related * Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutObjectTaggingAsync(const Model::PutObjectTaggingRequest& request, const PutObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates or modifies the PublicAccessBlock configuration for an * Amazon S3 bucket. To use this operation, you must have the * s3:PutBucketPublicAccessBlock permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

When Amazon S3 evaluates the * PublicAccessBlock configuration for a bucket or an object, it * checks the PublicAccessBlock configuration for both the bucket (or * the bucket that contains the object) and the bucket owner's account. If the * PublicAccessBlock configurations are different between the bucket * and the account, Amazon S3 uses the most restrictive combination of the * bucket-level and account-level settings.

For more * information about when Amazon S3 considers a bucket or an object public, see The * Meaning of "Public".

Related Resources

*

See Also:

AWS * API Reference

*/ virtual Model::PutPublicAccessBlockOutcome PutPublicAccessBlock(const Model::PutPublicAccessBlockRequest& request) const; /** *

Creates or modifies the PublicAccessBlock configuration for an * Amazon S3 bucket. To use this operation, you must have the * s3:PutBucketPublicAccessBlock permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

When Amazon S3 evaluates the * PublicAccessBlock configuration for a bucket or an object, it * checks the PublicAccessBlock configuration for both the bucket (or * the bucket that contains the object) and the bucket owner's account. If the * PublicAccessBlock configurations are different between the bucket * and the account, Amazon S3 uses the most restrictive combination of the * bucket-level and account-level settings.

For more * information about when Amazon S3 considers a bucket or an object public, see The * Meaning of "Public".

Related Resources

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutPublicAccessBlockOutcomeCallable PutPublicAccessBlockCallable(const Model::PutPublicAccessBlockRequest& request) const; /** *

Creates or modifies the PublicAccessBlock configuration for an * Amazon S3 bucket. To use this operation, you must have the * s3:PutBucketPublicAccessBlock permission. For more information * about Amazon S3 permissions, see Specifying * Permissions in a Policy.

When Amazon S3 evaluates the * PublicAccessBlock configuration for a bucket or an object, it * checks the PublicAccessBlock configuration for both the bucket (or * the bucket that contains the object) and the bucket owner's account. If the * PublicAccessBlock configurations are different between the bucket * and the account, Amazon S3 uses the most restrictive combination of the * bucket-level and account-level settings.

For more * information about when Amazon S3 considers a bucket or an object public, see The * Meaning of "Public".

Related Resources

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutPublicAccessBlockAsync(const Model::PutPublicAccessBlockRequest& request, const PutPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Restores an archived copy of an object back into Amazon S3

This action * is not supported by Amazon S3 on Outposts.

This action performs the * following types of requests:

  • select - Perform a * select query on an archived object

  • restore an * archive - Restore an archived object

To use this * operation, you must have permissions to perform the * s3:RestoreObject action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

Querying Archives with Select Requests

You use * a select type of request to perform SQL queries on archived objects. The * archived objects that are being queried by the select request must be formatted * as uncompressed comma-separated values (CSV) files. You can run queries and * custom analytics on your archived data without having to restore your data to a * hotter Amazon S3 tier. For an overview about select requests, see Querying * Archived Objects in the Amazon S3 User Guide.

When making a * select request, do the following:

  • Define an output location for * the select query's output. This must be an Amazon S3 bucket in the same Amazon * Web Services Region as the bucket that contains the archive object that is being * queried. The Amazon Web Services account that initiates the job must have * permissions to write to the S3 bucket. You can specify the storage class and * encryption for the output objects stored in the bucket. For more information * about output, see Querying * Archived Objects in the Amazon S3 User Guide.

    For more * information about the S3 structure in the request body, see the * following:

  • Define the SQL expression for the SELECT * type of restoration for your query in the request body's * SelectParameters structure. You can use expressions like the * following examples.

    • The following expression returns all * records from the specified object.

      SELECT * FROM Object *

    • Assuming that you are not using any headers for data stored * in the object, you can specify columns with positional headers.

      * SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • *

      If you have headers and you set the fileHeaderInfo in the * CSV structure in the request body to USE, you can * specify headers in the query. (If you set the fileHeaderInfo field * to IGNORE, the first row is skipped for the query.) You cannot mix * ordinal positions with header column names.

      SELECT s.Id, * s.FirstName, s.SSN FROM S3Object s

For * more information about using SQL with S3 Glacier Select restore, see SQL * Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 * User Guide.

When making a select request, you can also do the * following:

  • To expedite your queries, specify the * Expedited tier. For more information about tiers, see "Restoring * Archives," later in this topic.

  • Specify details about the data * serialization format of both the input object that is being queried and the * serialization of the CSV-encoded query results.

The following * are additional important facts about the select feature:

  • The * output results are new Amazon S3 objects. Unlike archive retrievals, they are * stored until explicitly deleted-manually or through a lifecycle policy.

    *
  • You can issue more than one select request on the same Amazon S3 * object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate * requests.

  • Amazon S3 accepts a select request even if the * object has already been restored. A select request doesn’t return error response * 409.

Restoring objects

Objects * that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and * S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are * not accessible in real time. For objects in Archive Access or Deep Archive * Access tiers you must first initiate a restore request, and then wait until the * object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 * Glacier Deep Archive storage classes you must first initiate a restore request, * and then wait until a temporary copy of the object is available. To access an * archived object, you must restore the object for the duration (number of days) * that you specify.

To restore a specific object version, you can provide a * version ID. If you don't provide a version ID, Amazon S3 restores the current * version.

When restoring an archived object (or using a select request), * you can specify one of the following data access tier options in the * Tier element of the request body:

  • * Expedited - Expedited retrievals allow you to quickly access * your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering * Archive tier when occasional urgent requests for a subset of archives are * required. For all but the largest archived objects (250 MB+), data accessed * using Expedited retrievals is typically made available within 1–5 minutes. * Provisioned capacity ensures that retrieval capacity for Expedited retrievals is * available when you need it. Expedited retrievals and provisioned capacity are * not available for objects stored in the S3 Glacier Deep Archive storage class or * S3 Intelligent-Tiering Deep Archive tier.

  • * Standard - Standard retrievals allow you to access any of your * archived objects within several hours. This is the default option for retrieval * requests that do not specify the retrieval option. Standard retrievals typically * finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 * Intelligent-Tiering Archive tier. They typically finish within 12 hours for * objects stored in the S3 Glacier Deep Archive storage class or S3 * Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects * stored in S3 Intelligent-Tiering.

  • Bulk * - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling * you to retrieve large amounts, even petabytes, of data inexpensively. Bulk * retrievals typically finish within 5–12 hours for objects stored in the S3 * Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically * finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage * class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for * objects stored in S3 Intelligent-Tiering.

For more * information about archive retrieval options and provisioned capacity for * Expedited data access, see Restoring * Archived Objects in the Amazon S3 User Guide.

You can use * Amazon S3 restore speed upgrade to change the restore speed to a faster speed * while it is in progress. For more information, see * Upgrading the speed of an in-progress restore in the Amazon S3 User * Guide.

To get the status of object restoration, you can send a * HEAD request. Operations return the x-amz-restore * header, which provides information about the restoration status, in the * response. You can use Amazon S3 event notifications to notify you when a restore * is initiated or completed. For more information, see Configuring * Amazon S3 Event Notifications in the Amazon S3 User Guide.

*

After restoring an archived object, you can update the restoration period by * reissuing the request with a new period. Amazon S3 updates the restoration * period relative to the current time and charges only for the request-there are * no data transfer charges. You cannot update the restoration period when Amazon * S3 is actively processing your current restore request for the object.

If * your bucket has a lifecycle configuration with a rule that includes an * expiration action, the object expiration overrides the life span that you * specify in a restore request. For example, if you restore an object copy for 10 * days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the * object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration * and Object * Lifecycle Management in Amazon S3 User Guide.

* Responses

A successful action returns either the 200 * OK or 202 Accepted status code.

  • If the * object is not previously restored, then Amazon S3 returns 202 * Accepted in the response.

  • If the object is previously * restored, Amazon S3 returns 200 OK in the response.

  • *

Special Errors

    • * Code: RestoreAlreadyInProgress

    • Cause: Object * restore is already in progress. (This error does not apply to SELECT type * requests.)

    • HTTP Status Code: 409 Conflict

      *
    • SOAP Fault Code Prefix: Client

  • *
    • Code: GlacierExpeditedRetrievalNotAvailable

    • *

      Cause: expedited retrievals are currently not available. Try again later. * (Returned if there is insufficient capacity to process the Expedited request. * This error applies only to Expedited retrievals and not to S3 Standard or Bulk * retrievals.)

    • HTTP Status Code: 503

    • *
    • SOAP Fault Code Prefix: N/A

Related Resources

See Also:

AWS * API Reference

*/ virtual Model::RestoreObjectOutcome RestoreObject(const Model::RestoreObjectRequest& request) const; /** *

Restores an archived copy of an object back into Amazon S3

This action * is not supported by Amazon S3 on Outposts.

This action performs the * following types of requests:

  • select - Perform a * select query on an archived object

  • restore an * archive - Restore an archived object

To use this * operation, you must have permissions to perform the * s3:RestoreObject action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

Querying Archives with Select Requests

You use * a select type of request to perform SQL queries on archived objects. The * archived objects that are being queried by the select request must be formatted * as uncompressed comma-separated values (CSV) files. You can run queries and * custom analytics on your archived data without having to restore your data to a * hotter Amazon S3 tier. For an overview about select requests, see Querying * Archived Objects in the Amazon S3 User Guide.

When making a * select request, do the following:

  • Define an output location for * the select query's output. This must be an Amazon S3 bucket in the same Amazon * Web Services Region as the bucket that contains the archive object that is being * queried. The Amazon Web Services account that initiates the job must have * permissions to write to the S3 bucket. You can specify the storage class and * encryption for the output objects stored in the bucket. For more information * about output, see Querying * Archived Objects in the Amazon S3 User Guide.

    For more * information about the S3 structure in the request body, see the * following:

  • Define the SQL expression for the SELECT * type of restoration for your query in the request body's * SelectParameters structure. You can use expressions like the * following examples.

    • The following expression returns all * records from the specified object.

      SELECT * FROM Object *

    • Assuming that you are not using any headers for data stored * in the object, you can specify columns with positional headers.

      * SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • *

      If you have headers and you set the fileHeaderInfo in the * CSV structure in the request body to USE, you can * specify headers in the query. (If you set the fileHeaderInfo field * to IGNORE, the first row is skipped for the query.) You cannot mix * ordinal positions with header column names.

      SELECT s.Id, * s.FirstName, s.SSN FROM S3Object s

For * more information about using SQL with S3 Glacier Select restore, see SQL * Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 * User Guide.

When making a select request, you can also do the * following:

  • To expedite your queries, specify the * Expedited tier. For more information about tiers, see "Restoring * Archives," later in this topic.

  • Specify details about the data * serialization format of both the input object that is being queried and the * serialization of the CSV-encoded query results.

The following * are additional important facts about the select feature:

  • The * output results are new Amazon S3 objects. Unlike archive retrievals, they are * stored until explicitly deleted-manually or through a lifecycle policy.

    *
  • You can issue more than one select request on the same Amazon S3 * object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate * requests.

  • Amazon S3 accepts a select request even if the * object has already been restored. A select request doesn’t return error response * 409.

Restoring objects

Objects * that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and * S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are * not accessible in real time. For objects in Archive Access or Deep Archive * Access tiers you must first initiate a restore request, and then wait until the * object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 * Glacier Deep Archive storage classes you must first initiate a restore request, * and then wait until a temporary copy of the object is available. To access an * archived object, you must restore the object for the duration (number of days) * that you specify.

To restore a specific object version, you can provide a * version ID. If you don't provide a version ID, Amazon S3 restores the current * version.

When restoring an archived object (or using a select request), * you can specify one of the following data access tier options in the * Tier element of the request body:

  • * Expedited - Expedited retrievals allow you to quickly access * your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering * Archive tier when occasional urgent requests for a subset of archives are * required. For all but the largest archived objects (250 MB+), data accessed * using Expedited retrievals is typically made available within 1–5 minutes. * Provisioned capacity ensures that retrieval capacity for Expedited retrievals is * available when you need it. Expedited retrievals and provisioned capacity are * not available for objects stored in the S3 Glacier Deep Archive storage class or * S3 Intelligent-Tiering Deep Archive tier.

  • * Standard - Standard retrievals allow you to access any of your * archived objects within several hours. This is the default option for retrieval * requests that do not specify the retrieval option. Standard retrievals typically * finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 * Intelligent-Tiering Archive tier. They typically finish within 12 hours for * objects stored in the S3 Glacier Deep Archive storage class or S3 * Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects * stored in S3 Intelligent-Tiering.

  • Bulk * - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling * you to retrieve large amounts, even petabytes, of data inexpensively. Bulk * retrievals typically finish within 5–12 hours for objects stored in the S3 * Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically * finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage * class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for * objects stored in S3 Intelligent-Tiering.

For more * information about archive retrieval options and provisioned capacity for * Expedited data access, see Restoring * Archived Objects in the Amazon S3 User Guide.

You can use * Amazon S3 restore speed upgrade to change the restore speed to a faster speed * while it is in progress. For more information, see * Upgrading the speed of an in-progress restore in the Amazon S3 User * Guide.

To get the status of object restoration, you can send a * HEAD request. Operations return the x-amz-restore * header, which provides information about the restoration status, in the * response. You can use Amazon S3 event notifications to notify you when a restore * is initiated or completed. For more information, see Configuring * Amazon S3 Event Notifications in the Amazon S3 User Guide.

*

After restoring an archived object, you can update the restoration period by * reissuing the request with a new period. Amazon S3 updates the restoration * period relative to the current time and charges only for the request-there are * no data transfer charges. You cannot update the restoration period when Amazon * S3 is actively processing your current restore request for the object.

If * your bucket has a lifecycle configuration with a rule that includes an * expiration action, the object expiration overrides the life span that you * specify in a restore request. For example, if you restore an object copy for 10 * days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the * object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration * and Object * Lifecycle Management in Amazon S3 User Guide.

* Responses

A successful action returns either the 200 * OK or 202 Accepted status code.

  • If the * object is not previously restored, then Amazon S3 returns 202 * Accepted in the response.

  • If the object is previously * restored, Amazon S3 returns 200 OK in the response.

  • *

Special Errors

    • * Code: RestoreAlreadyInProgress

    • Cause: Object * restore is already in progress. (This error does not apply to SELECT type * requests.)

    • HTTP Status Code: 409 Conflict

      *
    • SOAP Fault Code Prefix: Client

  • *
    • Code: GlacierExpeditedRetrievalNotAvailable

    • *

      Cause: expedited retrievals are currently not available. Try again later. * (Returned if there is insufficient capacity to process the Expedited request. * This error applies only to Expedited retrievals and not to S3 Standard or Bulk * retrievals.)

    • HTTP Status Code: 503

    • *
    • SOAP Fault Code Prefix: N/A

Related Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::RestoreObjectOutcomeCallable RestoreObjectCallable(const Model::RestoreObjectRequest& request) const; /** *

Restores an archived copy of an object back into Amazon S3

This action * is not supported by Amazon S3 on Outposts.

This action performs the * following types of requests:

  • select - Perform a * select query on an archived object

  • restore an * archive - Restore an archived object

To use this * operation, you must have permissions to perform the * s3:RestoreObject action. The bucket owner has this permission by * default and can grant this permission to others. For more information about * permissions, see Permissions * Related to Bucket Subresource Operations and Managing * Access Permissions to Your Amazon S3 Resources in the Amazon S3 User * Guide.

Querying Archives with Select Requests

You use * a select type of request to perform SQL queries on archived objects. The * archived objects that are being queried by the select request must be formatted * as uncompressed comma-separated values (CSV) files. You can run queries and * custom analytics on your archived data without having to restore your data to a * hotter Amazon S3 tier. For an overview about select requests, see Querying * Archived Objects in the Amazon S3 User Guide.

When making a * select request, do the following:

  • Define an output location for * the select query's output. This must be an Amazon S3 bucket in the same Amazon * Web Services Region as the bucket that contains the archive object that is being * queried. The Amazon Web Services account that initiates the job must have * permissions to write to the S3 bucket. You can specify the storage class and * encryption for the output objects stored in the bucket. For more information * about output, see Querying * Archived Objects in the Amazon S3 User Guide.

    For more * information about the S3 structure in the request body, see the * following:

  • Define the SQL expression for the SELECT * type of restoration for your query in the request body's * SelectParameters structure. You can use expressions like the * following examples.

    • The following expression returns all * records from the specified object.

      SELECT * FROM Object *

    • Assuming that you are not using any headers for data stored * in the object, you can specify columns with positional headers.

      * SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • *

      If you have headers and you set the fileHeaderInfo in the * CSV structure in the request body to USE, you can * specify headers in the query. (If you set the fileHeaderInfo field * to IGNORE, the first row is skipped for the query.) You cannot mix * ordinal positions with header column names.

      SELECT s.Id, * s.FirstName, s.SSN FROM S3Object s

For * more information about using SQL with S3 Glacier Select restore, see SQL * Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 * User Guide.

When making a select request, you can also do the * following:

  • To expedite your queries, specify the * Expedited tier. For more information about tiers, see "Restoring * Archives," later in this topic.

  • Specify details about the data * serialization format of both the input object that is being queried and the * serialization of the CSV-encoded query results.

The following * are additional important facts about the select feature:

  • The * output results are new Amazon S3 objects. Unlike archive retrievals, they are * stored until explicitly deleted-manually or through a lifecycle policy.

    *
  • You can issue more than one select request on the same Amazon S3 * object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate * requests.

  • Amazon S3 accepts a select request even if the * object has already been restored. A select request doesn’t return error response * 409.

Restoring objects

Objects * that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and * S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are * not accessible in real time. For objects in Archive Access or Deep Archive * Access tiers you must first initiate a restore request, and then wait until the * object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 * Glacier Deep Archive storage classes you must first initiate a restore request, * and then wait until a temporary copy of the object is available. To access an * archived object, you must restore the object for the duration (number of days) * that you specify.

To restore a specific object version, you can provide a * version ID. If you don't provide a version ID, Amazon S3 restores the current * version.

When restoring an archived object (or using a select request), * you can specify one of the following data access tier options in the * Tier element of the request body:

  • * Expedited - Expedited retrievals allow you to quickly access * your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering * Archive tier when occasional urgent requests for a subset of archives are * required. For all but the largest archived objects (250 MB+), data accessed * using Expedited retrievals is typically made available within 1–5 minutes. * Provisioned capacity ensures that retrieval capacity for Expedited retrievals is * available when you need it. Expedited retrievals and provisioned capacity are * not available for objects stored in the S3 Glacier Deep Archive storage class or * S3 Intelligent-Tiering Deep Archive tier.

  • * Standard - Standard retrievals allow you to access any of your * archived objects within several hours. This is the default option for retrieval * requests that do not specify the retrieval option. Standard retrievals typically * finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 * Intelligent-Tiering Archive tier. They typically finish within 12 hours for * objects stored in the S3 Glacier Deep Archive storage class or S3 * Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects * stored in S3 Intelligent-Tiering.

  • Bulk * - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling * you to retrieve large amounts, even petabytes, of data inexpensively. Bulk * retrievals typically finish within 5–12 hours for objects stored in the S3 * Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically * finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage * class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for * objects stored in S3 Intelligent-Tiering.

For more * information about archive retrieval options and provisioned capacity for * Expedited data access, see Restoring * Archived Objects in the Amazon S3 User Guide.

You can use * Amazon S3 restore speed upgrade to change the restore speed to a faster speed * while it is in progress. For more information, see * Upgrading the speed of an in-progress restore in the Amazon S3 User * Guide.

To get the status of object restoration, you can send a * HEAD request. Operations return the x-amz-restore * header, which provides information about the restoration status, in the * response. You can use Amazon S3 event notifications to notify you when a restore * is initiated or completed. For more information, see Configuring * Amazon S3 Event Notifications in the Amazon S3 User Guide.

*

After restoring an archived object, you can update the restoration period by * reissuing the request with a new period. Amazon S3 updates the restoration * period relative to the current time and charges only for the request-there are * no data transfer charges. You cannot update the restoration period when Amazon * S3 is actively processing your current restore request for the object.

If * your bucket has a lifecycle configuration with a rule that includes an * expiration action, the object expiration overrides the life span that you * specify in a restore request. For example, if you restore an object copy for 10 * days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the * object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration * and Object * Lifecycle Management in Amazon S3 User Guide.

* Responses

A successful action returns either the 200 * OK or 202 Accepted status code.

  • If the * object is not previously restored, then Amazon S3 returns 202 * Accepted in the response.

  • If the object is previously * restored, Amazon S3 returns 200 OK in the response.

  • *

Special Errors

    • * Code: RestoreAlreadyInProgress

    • Cause: Object * restore is already in progress. (This error does not apply to SELECT type * requests.)

    • HTTP Status Code: 409 Conflict

      *
    • SOAP Fault Code Prefix: Client

  • *
    • Code: GlacierExpeditedRetrievalNotAvailable

    • *

      Cause: expedited retrievals are currently not available. Try again later. * (Returned if there is insufficient capacity to process the Expedited request. * This error applies only to Expedited retrievals and not to S3 Standard or Bulk * retrievals.)

    • HTTP Status Code: 503

    • *
    • SOAP Fault Code Prefix: N/A

Related Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void RestoreObjectAsync(const Model::RestoreObjectRequest& request, const RestoreObjectResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action filters the contents of an Amazon S3 object based on a simple * structured query language (SQL) statement. In the request, along with the SQL * expression, you must also specify a data serialization format (JSON, CSV, or * Apache Parquet) of the object. Amazon S3 uses this format to parse object data * into records, and returns only records that match the specified SQL expression. * You must also specify the data serialization format for the response.

*

This action is not supported by Amazon S3 on Outposts.

For more * information about Amazon S3 Select, see Selecting * Content from Objects in the Amazon S3 User Guide.

For more * information about using SQL with Amazon S3 Select, see * SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 * User Guide.

Permissions

You must have * s3:GetObject permission for this operation. Amazon S3 Select does * not support anonymous access. For more information about permissions, see Specifying * Permissions in a Policy in the Amazon S3 User Guide.

* Object Data Formats

You can use Amazon S3 Select to query objects * that have the following format properties:

  • CSV, JSON, and * Parquet - Objects must be in CSV, JSON, or Parquet format.

  • *

    UTF-8 - UTF-8 is the only encoding type Amazon S3 Select * supports.

  • GZIP or BZIP2 - CSV and JSON files can be * compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats * that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports * columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not * support whole-object compression for Parquet objects.

  • * Server-side encryption - Amazon S3 Select supports querying objects that * are protected with server-side encryption.

    For objects that are encrypted * with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must * use the headers that are documented in the GetObject. * For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 * User Guide.

    For objects that are encrypted with Amazon S3 managed * encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side * encryption is handled transparently, so you don't need to specify anything. For * more information about server-side encryption, including SSE-S3 and SSE-KMS, see * Protecting * Data Using Server-Side Encryption in the Amazon S3 User Guide.

    *

Working with the Response Body

Given the response * size is unknown, Amazon S3 Select streams the response as a series of messages * and includes a Transfer-Encoding header with chunked * as its value in the response. For more information, see Appendix: * SelectObjectContent Response.

GetObject Support

*

The SelectObjectContent action does not support the following * GetObject functionality. For more information, see GetObject.

*
  • Range: Although you can specify a scan range for an * Amazon S3 Select request (see SelectObjectContentRequest * - ScanRange in the request parameters), you cannot specify the range of * bytes of an object to return.

  • GLACIER, DEEP_ARCHIVE and * REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, * DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more * information, about storage classes see Storage * Classes in the Amazon S3 User Guide.

* Special Errors

For a list of special errors for this operation, * see List * of SELECT Object Content Error Codes

Related * Resources

See Also:

AWS * API Reference

*/ virtual Model::SelectObjectContentOutcome SelectObjectContent(Model::SelectObjectContentRequest& request) const; /** *

This action filters the contents of an Amazon S3 object based on a simple * structured query language (SQL) statement. In the request, along with the SQL * expression, you must also specify a data serialization format (JSON, CSV, or * Apache Parquet) of the object. Amazon S3 uses this format to parse object data * into records, and returns only records that match the specified SQL expression. * You must also specify the data serialization format for the response.

*

This action is not supported by Amazon S3 on Outposts.

For more * information about Amazon S3 Select, see Selecting * Content from Objects in the Amazon S3 User Guide.

For more * information about using SQL with Amazon S3 Select, see * SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 * User Guide.

Permissions

You must have * s3:GetObject permission for this operation. Amazon S3 Select does * not support anonymous access. For more information about permissions, see Specifying * Permissions in a Policy in the Amazon S3 User Guide.

* Object Data Formats

You can use Amazon S3 Select to query objects * that have the following format properties:

  • CSV, JSON, and * Parquet - Objects must be in CSV, JSON, or Parquet format.

  • *

    UTF-8 - UTF-8 is the only encoding type Amazon S3 Select * supports.

  • GZIP or BZIP2 - CSV and JSON files can be * compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats * that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports * columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not * support whole-object compression for Parquet objects.

  • * Server-side encryption - Amazon S3 Select supports querying objects that * are protected with server-side encryption.

    For objects that are encrypted * with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must * use the headers that are documented in the GetObject. * For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 * User Guide.

    For objects that are encrypted with Amazon S3 managed * encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side * encryption is handled transparently, so you don't need to specify anything. For * more information about server-side encryption, including SSE-S3 and SSE-KMS, see * Protecting * Data Using Server-Side Encryption in the Amazon S3 User Guide.

    *

Working with the Response Body

Given the response * size is unknown, Amazon S3 Select streams the response as a series of messages * and includes a Transfer-Encoding header with chunked * as its value in the response. For more information, see Appendix: * SelectObjectContent Response.

GetObject Support

*

The SelectObjectContent action does not support the following * GetObject functionality. For more information, see GetObject.

*
  • Range: Although you can specify a scan range for an * Amazon S3 Select request (see SelectObjectContentRequest * - ScanRange in the request parameters), you cannot specify the range of * bytes of an object to return.

  • GLACIER, DEEP_ARCHIVE and * REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, * DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more * information, about storage classes see Storage * Classes in the Amazon S3 User Guide.

* Special Errors

For a list of special errors for this operation, * see List * of SELECT Object Content Error Codes

Related * Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::SelectObjectContentOutcomeCallable SelectObjectContentCallable(Model::SelectObjectContentRequest& request) const; /** *

This action filters the contents of an Amazon S3 object based on a simple * structured query language (SQL) statement. In the request, along with the SQL * expression, you must also specify a data serialization format (JSON, CSV, or * Apache Parquet) of the object. Amazon S3 uses this format to parse object data * into records, and returns only records that match the specified SQL expression. * You must also specify the data serialization format for the response.

*

This action is not supported by Amazon S3 on Outposts.

For more * information about Amazon S3 Select, see Selecting * Content from Objects in the Amazon S3 User Guide.

For more * information about using SQL with Amazon S3 Select, see * SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 * User Guide.

Permissions

You must have * s3:GetObject permission for this operation. Amazon S3 Select does * not support anonymous access. For more information about permissions, see Specifying * Permissions in a Policy in the Amazon S3 User Guide.

* Object Data Formats

You can use Amazon S3 Select to query objects * that have the following format properties:

  • CSV, JSON, and * Parquet - Objects must be in CSV, JSON, or Parquet format.

  • *

    UTF-8 - UTF-8 is the only encoding type Amazon S3 Select * supports.

  • GZIP or BZIP2 - CSV and JSON files can be * compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats * that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports * columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not * support whole-object compression for Parquet objects.

  • * Server-side encryption - Amazon S3 Select supports querying objects that * are protected with server-side encryption.

    For objects that are encrypted * with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must * use the headers that are documented in the GetObject. * For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 * User Guide.

    For objects that are encrypted with Amazon S3 managed * encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side * encryption is handled transparently, so you don't need to specify anything. For * more information about server-side encryption, including SSE-S3 and SSE-KMS, see * Protecting * Data Using Server-Side Encryption in the Amazon S3 User Guide.

    *

Working with the Response Body

Given the response * size is unknown, Amazon S3 Select streams the response as a series of messages * and includes a Transfer-Encoding header with chunked * as its value in the response. For more information, see Appendix: * SelectObjectContent Response.

GetObject Support

*

The SelectObjectContent action does not support the following * GetObject functionality. For more information, see GetObject.

*
  • Range: Although you can specify a scan range for an * Amazon S3 Select request (see SelectObjectContentRequest * - ScanRange in the request parameters), you cannot specify the range of * bytes of an object to return.

  • GLACIER, DEEP_ARCHIVE and * REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, * DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more * information, about storage classes see Storage * Classes in the Amazon S3 User Guide.

* Special Errors

For a list of special errors for this operation, * see List * of SELECT Object Content Error Codes

Related * Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void SelectObjectContentAsync(Model::SelectObjectContentRequest& request, const SelectObjectContentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Uploads a part in a multipart upload.

In this operation, you * provide part data in your request. However, you have an option to specify your * existing Amazon S3 object as a data source for the part you are uploading. To * upload a part from an existing object, you use the UploadPartCopy * operation.

You must initiate a multipart upload (see CreateMultipartUpload) * before you can upload any part. In response to your initiate request, Amazon S3 * returns an upload ID, a unique identifier, that you must include in your upload * part request.

Part numbers can be any number from 1 to 10,000, inclusive. * A part number uniquely identifies a part and also defines its position within * the object being created. If you upload a new part using the same part number * that was used with a previous part, the previously uploaded part is overwritten. * Each part must be at least 5 MB in size, except the last part. There is no size * limit on the last part of your multipart upload.

To ensure that data is * not corrupted when traversing the network, specify the Content-MD5 * header in the upload part request. Amazon S3 checks the part data against the * provided MD5 value. If they do not match, Amazon S3 returns an error.

If * the upload request is signed with Signature Version 4, then Amazon Web Services * S3 uses the x-amz-content-sha256 header as a checksum instead of * Content-MD5. For more information see Authenticating * Requests: Using the Authorization Header (Amazon Web Services Signature Version * 4).

Note: After you initiate multipart upload and upload one * or more parts, you must either complete or abort multipart upload in order to * stop getting charged for storage of the uploaded parts. Only after you either * complete or abort multipart upload, Amazon S3 frees up the parts storage and * stops charging you for the parts storage.

For more information on * multipart uploads, go to Multipart * Upload Overview in the Amazon S3 User Guide .

For information * on the permissions required to use the multipart upload API, go to Multipart * Upload and Permissions in the Amazon S3 User Guide.

You can * optionally request server-side encryption where Amazon S3 encrypts your data as * it writes it to disks in its data centers and decrypts it for you when you * access it. You have the option of providing your own encryption key, or you can * use the Amazon Web Services managed encryption keys. If you choose to provide * your own encryption key, the request headers you provide in the request must * match the headers you used in the request to initiate the upload by using CreateMultipartUpload. * For more information, go to Using * Server-Side Encryption in the Amazon S3 User Guide.

*

Server-side encryption is supported by the S3 Multipart Upload actions. * Unless you are using a customer-provided encryption key, you don't need to * specify the encryption parameters in each UploadPart request. Instead, you only * need to specify the server-side encryption parameters in the initial Initiate * Multipart request. For more information, see CreateMultipartUpload.

*

If you requested server-side encryption using a customer-provided encryption * key in your initiate multipart upload request, you must provide identical * encryption information in each part upload using the following headers.

    *
  • x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

Special Errors

    • Code: * NoSuchUpload

    • Cause: The specified multipart upload * does not exist. The upload ID might be invalid, or the multipart upload might * have been aborted or completed.

    • HTTP Status Code: * 404 Not Found

    • SOAP Fault Code Prefix: Client *

Related Resources

*

See Also:

AWS API * Reference

*/ virtual Model::UploadPartOutcome UploadPart(const Model::UploadPartRequest& request) const; /** *

Uploads a part in a multipart upload.

In this operation, you * provide part data in your request. However, you have an option to specify your * existing Amazon S3 object as a data source for the part you are uploading. To * upload a part from an existing object, you use the UploadPartCopy * operation.

You must initiate a multipart upload (see CreateMultipartUpload) * before you can upload any part. In response to your initiate request, Amazon S3 * returns an upload ID, a unique identifier, that you must include in your upload * part request.

Part numbers can be any number from 1 to 10,000, inclusive. * A part number uniquely identifies a part and also defines its position within * the object being created. If you upload a new part using the same part number * that was used with a previous part, the previously uploaded part is overwritten. * Each part must be at least 5 MB in size, except the last part. There is no size * limit on the last part of your multipart upload.

To ensure that data is * not corrupted when traversing the network, specify the Content-MD5 * header in the upload part request. Amazon S3 checks the part data against the * provided MD5 value. If they do not match, Amazon S3 returns an error.

If * the upload request is signed with Signature Version 4, then Amazon Web Services * S3 uses the x-amz-content-sha256 header as a checksum instead of * Content-MD5. For more information see Authenticating * Requests: Using the Authorization Header (Amazon Web Services Signature Version * 4).

Note: After you initiate multipart upload and upload one * or more parts, you must either complete or abort multipart upload in order to * stop getting charged for storage of the uploaded parts. Only after you either * complete or abort multipart upload, Amazon S3 frees up the parts storage and * stops charging you for the parts storage.

For more information on * multipart uploads, go to Multipart * Upload Overview in the Amazon S3 User Guide .

For information * on the permissions required to use the multipart upload API, go to Multipart * Upload and Permissions in the Amazon S3 User Guide.

You can * optionally request server-side encryption where Amazon S3 encrypts your data as * it writes it to disks in its data centers and decrypts it for you when you * access it. You have the option of providing your own encryption key, or you can * use the Amazon Web Services managed encryption keys. If you choose to provide * your own encryption key, the request headers you provide in the request must * match the headers you used in the request to initiate the upload by using CreateMultipartUpload. * For more information, go to Using * Server-Side Encryption in the Amazon S3 User Guide.

*

Server-side encryption is supported by the S3 Multipart Upload actions. * Unless you are using a customer-provided encryption key, you don't need to * specify the encryption parameters in each UploadPart request. Instead, you only * need to specify the server-side encryption parameters in the initial Initiate * Multipart request. For more information, see CreateMultipartUpload.

*

If you requested server-side encryption using a customer-provided encryption * key in your initiate multipart upload request, you must provide identical * encryption information in each part upload using the following headers.

    *
  • x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

Special Errors

    • Code: * NoSuchUpload

    • Cause: The specified multipart upload * does not exist. The upload ID might be invalid, or the multipart upload might * have been aborted or completed.

    • HTTP Status Code: * 404 Not Found

    • SOAP Fault Code Prefix: Client *

Related Resources

*

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UploadPartOutcomeCallable UploadPartCallable(const Model::UploadPartRequest& request) const; /** *

Uploads a part in a multipart upload.

In this operation, you * provide part data in your request. However, you have an option to specify your * existing Amazon S3 object as a data source for the part you are uploading. To * upload a part from an existing object, you use the UploadPartCopy * operation.

You must initiate a multipart upload (see CreateMultipartUpload) * before you can upload any part. In response to your initiate request, Amazon S3 * returns an upload ID, a unique identifier, that you must include in your upload * part request.

Part numbers can be any number from 1 to 10,000, inclusive. * A part number uniquely identifies a part and also defines its position within * the object being created. If you upload a new part using the same part number * that was used with a previous part, the previously uploaded part is overwritten. * Each part must be at least 5 MB in size, except the last part. There is no size * limit on the last part of your multipart upload.

To ensure that data is * not corrupted when traversing the network, specify the Content-MD5 * header in the upload part request. Amazon S3 checks the part data against the * provided MD5 value. If they do not match, Amazon S3 returns an error.

If * the upload request is signed with Signature Version 4, then Amazon Web Services * S3 uses the x-amz-content-sha256 header as a checksum instead of * Content-MD5. For more information see Authenticating * Requests: Using the Authorization Header (Amazon Web Services Signature Version * 4).

Note: After you initiate multipart upload and upload one * or more parts, you must either complete or abort multipart upload in order to * stop getting charged for storage of the uploaded parts. Only after you either * complete or abort multipart upload, Amazon S3 frees up the parts storage and * stops charging you for the parts storage.

For more information on * multipart uploads, go to Multipart * Upload Overview in the Amazon S3 User Guide .

For information * on the permissions required to use the multipart upload API, go to Multipart * Upload and Permissions in the Amazon S3 User Guide.

You can * optionally request server-side encryption where Amazon S3 encrypts your data as * it writes it to disks in its data centers and decrypts it for you when you * access it. You have the option of providing your own encryption key, or you can * use the Amazon Web Services managed encryption keys. If you choose to provide * your own encryption key, the request headers you provide in the request must * match the headers you used in the request to initiate the upload by using CreateMultipartUpload. * For more information, go to Using * Server-Side Encryption in the Amazon S3 User Guide.

*

Server-side encryption is supported by the S3 Multipart Upload actions. * Unless you are using a customer-provided encryption key, you don't need to * specify the encryption parameters in each UploadPart request. Instead, you only * need to specify the server-side encryption parameters in the initial Initiate * Multipart request. For more information, see CreateMultipartUpload.

*

If you requested server-side encryption using a customer-provided encryption * key in your initiate multipart upload request, you must provide identical * encryption information in each part upload using the following headers.

    *
  • x-amz-server-side-encryption-customer-algorithm

  • *

    x-amz-server-side-encryption-customer-key

  • *

    x-amz-server-side-encryption-customer-key-MD5

Special Errors

    • Code: * NoSuchUpload

    • Cause: The specified multipart upload * does not exist. The upload ID might be invalid, or the multipart upload might * have been aborted or completed.

    • HTTP Status Code: * 404 Not Found

    • SOAP Fault Code Prefix: Client *

Related Resources

*

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UploadPartAsync(const Model::UploadPartRequest& request, const UploadPartResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Uploads a part by copying data from an existing object as data source. You * specify the data source by adding the request header * x-amz-copy-source in your request and a byte range by adding the * request header x-amz-copy-source-range in your request.

The * minimum allowable part size for a multipart upload is 5 MB. For more information * about multipart upload limits, go to Quick * Facts in the Amazon S3 User Guide.

Instead of using an * existing object as part data, you might use the UploadPart * action and provide data in your request.

You must initiate a * multipart upload before you can upload any part. In response to your initiate * request. Amazon S3 returns a unique identifier, the upload ID, that you must * include in your upload part request.

For more information about using the * UploadPartCopy operation, see the following:

  • For * conceptual information about multipart uploads, see Uploading * Objects Using Multipart Upload in the Amazon S3 User Guide.

  • *
  • For information about permissions required to use the multipart upload * API, see Multipart * Upload and Permissions in the Amazon S3 User Guide.

  • *

    For information about copying objects using a single atomic action vs. the * multipart upload, see Operations * on Objects in the Amazon S3 User Guide.

  • For * information about using server-side encryption with customer-provided encryption * keys with the UploadPartCopy operation, see CopyObject * and UploadPart.

    *

Note the following additional considerations about the request * headers x-amz-copy-source-if-match, * x-amz-copy-source-if-none-match, * x-amz-copy-source-if-unmodified-since, and * x-amz-copy-source-if-modified-since:

  • * Consideration 1 - If both of the x-amz-copy-source-if-match * and x-amz-copy-source-if-unmodified-since headers are present in * the request as follows:

    x-amz-copy-source-if-match * condition evaluates to true, and;

    * x-amz-copy-source-if-unmodified-since condition evaluates to * false;

    Amazon S3 returns 200 OK and copies the * data.

  • Consideration 2 - If both of the * x-amz-copy-source-if-none-match and * x-amz-copy-source-if-modified-since headers are present in the * request as follows:

    x-amz-copy-source-if-none-match * condition evaluates to false, and;

    * x-amz-copy-source-if-modified-since condition evaluates to * true;

    Amazon S3 returns 412 Precondition Failed * response code.

Versioning

If your bucket has * versioning enabled, you could have multiple versions of the same object. By * default, x-amz-copy-source identifies the current version of the * object to copy. If the current version is a delete marker and you don't specify * a versionId in the x-amz-copy-source, Amazon S3 returns a 404 * error, because the object does not exist. If you specify versionId in the * x-amz-copy-source and the versionId is a delete marker, Amazon S3 * returns an HTTP 400 error, because you are not allowed to specify a delete * marker as a version for the x-amz-copy-source.

You can * optionally specify a specific version of the source object to copy by adding the * versionId subresource as shown in the following example:

* x-amz-copy-source: /bucket/object?versionId=version id

Special Errors

    • Code: * NoSuchUpload

    • Cause: The specified multipart upload * does not exist. The upload ID might be invalid, or the multipart upload might * have been aborted or completed.

    • HTTP Status Code: 404 * Not Found

    • Code: * InvalidRequest

    • Cause: The specified copy source is * not supported as a byte-range copy source.

    • HTTP * Status Code: 400 Bad Request

* Related Resources

See Also:

AWS * API Reference

*/ virtual Model::UploadPartCopyOutcome UploadPartCopy(const Model::UploadPartCopyRequest& request) const; /** *

Uploads a part by copying data from an existing object as data source. You * specify the data source by adding the request header * x-amz-copy-source in your request and a byte range by adding the * request header x-amz-copy-source-range in your request.

The * minimum allowable part size for a multipart upload is 5 MB. For more information * about multipart upload limits, go to Quick * Facts in the Amazon S3 User Guide.

Instead of using an * existing object as part data, you might use the UploadPart * action and provide data in your request.

You must initiate a * multipart upload before you can upload any part. In response to your initiate * request. Amazon S3 returns a unique identifier, the upload ID, that you must * include in your upload part request.

For more information about using the * UploadPartCopy operation, see the following:

  • For * conceptual information about multipart uploads, see Uploading * Objects Using Multipart Upload in the Amazon S3 User Guide.

  • *
  • For information about permissions required to use the multipart upload * API, see Multipart * Upload and Permissions in the Amazon S3 User Guide.

  • *

    For information about copying objects using a single atomic action vs. the * multipart upload, see Operations * on Objects in the Amazon S3 User Guide.

  • For * information about using server-side encryption with customer-provided encryption * keys with the UploadPartCopy operation, see CopyObject * and UploadPart.

    *

Note the following additional considerations about the request * headers x-amz-copy-source-if-match, * x-amz-copy-source-if-none-match, * x-amz-copy-source-if-unmodified-since, and * x-amz-copy-source-if-modified-since:

  • * Consideration 1 - If both of the x-amz-copy-source-if-match * and x-amz-copy-source-if-unmodified-since headers are present in * the request as follows:

    x-amz-copy-source-if-match * condition evaluates to true, and;

    * x-amz-copy-source-if-unmodified-since condition evaluates to * false;

    Amazon S3 returns 200 OK and copies the * data.

  • Consideration 2 - If both of the * x-amz-copy-source-if-none-match and * x-amz-copy-source-if-modified-since headers are present in the * request as follows:

    x-amz-copy-source-if-none-match * condition evaluates to false, and;

    * x-amz-copy-source-if-modified-since condition evaluates to * true;

    Amazon S3 returns 412 Precondition Failed * response code.

Versioning

If your bucket has * versioning enabled, you could have multiple versions of the same object. By * default, x-amz-copy-source identifies the current version of the * object to copy. If the current version is a delete marker and you don't specify * a versionId in the x-amz-copy-source, Amazon S3 returns a 404 * error, because the object does not exist. If you specify versionId in the * x-amz-copy-source and the versionId is a delete marker, Amazon S3 * returns an HTTP 400 error, because you are not allowed to specify a delete * marker as a version for the x-amz-copy-source.

You can * optionally specify a specific version of the source object to copy by adding the * versionId subresource as shown in the following example:

* x-amz-copy-source: /bucket/object?versionId=version id

Special Errors

    • Code: * NoSuchUpload

    • Cause: The specified multipart upload * does not exist. The upload ID might be invalid, or the multipart upload might * have been aborted or completed.

    • HTTP Status Code: 404 * Not Found

    • Code: * InvalidRequest

    • Cause: The specified copy source is * not supported as a byte-range copy source.

    • HTTP * Status Code: 400 Bad Request

* Related Resources

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UploadPartCopyOutcomeCallable UploadPartCopyCallable(const Model::UploadPartCopyRequest& request) const; /** *

Uploads a part by copying data from an existing object as data source. You * specify the data source by adding the request header * x-amz-copy-source in your request and a byte range by adding the * request header x-amz-copy-source-range in your request.

The * minimum allowable part size for a multipart upload is 5 MB. For more information * about multipart upload limits, go to Quick * Facts in the Amazon S3 User Guide.

Instead of using an * existing object as part data, you might use the UploadPart * action and provide data in your request.

You must initiate a * multipart upload before you can upload any part. In response to your initiate * request. Amazon S3 returns a unique identifier, the upload ID, that you must * include in your upload part request.

For more information about using the * UploadPartCopy operation, see the following:

  • For * conceptual information about multipart uploads, see Uploading * Objects Using Multipart Upload in the Amazon S3 User Guide.

  • *
  • For information about permissions required to use the multipart upload * API, see Multipart * Upload and Permissions in the Amazon S3 User Guide.

  • *

    For information about copying objects using a single atomic action vs. the * multipart upload, see Operations * on Objects in the Amazon S3 User Guide.

  • For * information about using server-side encryption with customer-provided encryption * keys with the UploadPartCopy operation, see CopyObject * and UploadPart.

    *

Note the following additional considerations about the request * headers x-amz-copy-source-if-match, * x-amz-copy-source-if-none-match, * x-amz-copy-source-if-unmodified-since, and * x-amz-copy-source-if-modified-since:

  • * Consideration 1 - If both of the x-amz-copy-source-if-match * and x-amz-copy-source-if-unmodified-since headers are present in * the request as follows:

    x-amz-copy-source-if-match * condition evaluates to true, and;

    * x-amz-copy-source-if-unmodified-since condition evaluates to * false;

    Amazon S3 returns 200 OK and copies the * data.

  • Consideration 2 - If both of the * x-amz-copy-source-if-none-match and * x-amz-copy-source-if-modified-since headers are present in the * request as follows:

    x-amz-copy-source-if-none-match * condition evaluates to false, and;

    * x-amz-copy-source-if-modified-since condition evaluates to * true;

    Amazon S3 returns 412 Precondition Failed * response code.

Versioning

If your bucket has * versioning enabled, you could have multiple versions of the same object. By * default, x-amz-copy-source identifies the current version of the * object to copy. If the current version is a delete marker and you don't specify * a versionId in the x-amz-copy-source, Amazon S3 returns a 404 * error, because the object does not exist. If you specify versionId in the * x-amz-copy-source and the versionId is a delete marker, Amazon S3 * returns an HTTP 400 error, because you are not allowed to specify a delete * marker as a version for the x-amz-copy-source.

You can * optionally specify a specific version of the source object to copy by adding the * versionId subresource as shown in the following example:

* x-amz-copy-source: /bucket/object?versionId=version id

Special Errors

    • Code: * NoSuchUpload

    • Cause: The specified multipart upload * does not exist. The upload ID might be invalid, or the multipart upload might * have been aborted or completed.

    • HTTP Status Code: 404 * Not Found

    • Code: * InvalidRequest

    • Cause: The specified copy source is * not supported as a byte-range copy source.

    • HTTP * Status Code: 400 Bad Request

* Related Resources

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UploadPartCopyAsync(const Model::UploadPartCopyRequest& request, const UploadPartCopyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Passes transformed objects to a GetObject operation when using * Object Lambda access points. For information about Object Lambda access points, * see Transforming * objects with Object Lambda access points in the Amazon S3 User * Guide.

This operation supports metadata that can be returned by GetObject, * in addition to RequestRoute, RequestToken, * StatusCode, ErrorCode, and ErrorMessage. * The GetObject response metadata is supported so that the * WriteGetObjectResponse caller, typically an Lambda function, can * provide the same metadata when it internally invokes GetObject. * When WriteGetObjectResponse is called by a customer-owned Lambda * function, the metadata returned to the end user GetObject call * might differ from what Amazon S3 would normally return.

You can include * any number of metadata headers. When including a metadata header, it should be * prefaced with x-amz-meta. For example, * x-amz-meta-my-custom-header: MyCustomValue. The primary use case * for this is to forward GetObject metadata.

Amazon Web * Services provides some prebuilt Lambda functions that you can use with S3 Object * Lambda to detect and redact personally identifiable information (PII) and * decompress S3 objects. These Lambda functions are available in the Amazon Web * Services Serverless Application Repository, and can be selected through the * Amazon Web Services Management Console when you create your Object Lambda access * point.

Example 1: PII Access Control - This Lambda function uses Amazon * Comprehend, a natural language processing (NLP) service using machine learning * to find insights and relationships in text. It automatically detects personally * identifiable information (PII) such as names, addresses, dates, credit card * numbers, and social security numbers from documents in your Amazon S3 bucket. *

Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, * a natural language processing (NLP) service using machine learning to find * insights and relationships in text. It automatically redacts personally * identifiable information (PII) such as names, addresses, dates, credit card * numbers, and social security numbers from documents in your Amazon S3 bucket. *

Example 3: Decompression - The Lambda function * S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in * one of six compressed file formats including bzip2, gzip, snappy, zlib, * zstandard and ZIP.

For information on how to view and use these * functions, see Using * Amazon Web Services built Lambda functions in the Amazon S3 User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::WriteGetObjectResponseOutcome WriteGetObjectResponse(const Model::WriteGetObjectResponseRequest& request) const; /** *

Passes transformed objects to a GetObject operation when using * Object Lambda access points. For information about Object Lambda access points, * see Transforming * objects with Object Lambda access points in the Amazon S3 User * Guide.

This operation supports metadata that can be returned by GetObject, * in addition to RequestRoute, RequestToken, * StatusCode, ErrorCode, and ErrorMessage. * The GetObject response metadata is supported so that the * WriteGetObjectResponse caller, typically an Lambda function, can * provide the same metadata when it internally invokes GetObject. * When WriteGetObjectResponse is called by a customer-owned Lambda * function, the metadata returned to the end user GetObject call * might differ from what Amazon S3 would normally return.

You can include * any number of metadata headers. When including a metadata header, it should be * prefaced with x-amz-meta. For example, * x-amz-meta-my-custom-header: MyCustomValue. The primary use case * for this is to forward GetObject metadata.

Amazon Web * Services provides some prebuilt Lambda functions that you can use with S3 Object * Lambda to detect and redact personally identifiable information (PII) and * decompress S3 objects. These Lambda functions are available in the Amazon Web * Services Serverless Application Repository, and can be selected through the * Amazon Web Services Management Console when you create your Object Lambda access * point.

Example 1: PII Access Control - This Lambda function uses Amazon * Comprehend, a natural language processing (NLP) service using machine learning * to find insights and relationships in text. It automatically detects personally * identifiable information (PII) such as names, addresses, dates, credit card * numbers, and social security numbers from documents in your Amazon S3 bucket. *

Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, * a natural language processing (NLP) service using machine learning to find * insights and relationships in text. It automatically redacts personally * identifiable information (PII) such as names, addresses, dates, credit card * numbers, and social security numbers from documents in your Amazon S3 bucket. *

Example 3: Decompression - The Lambda function * S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in * one of six compressed file formats including bzip2, gzip, snappy, zlib, * zstandard and ZIP.

For information on how to view and use these * functions, see Using * Amazon Web Services built Lambda functions in the Amazon S3 User * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::WriteGetObjectResponseOutcomeCallable WriteGetObjectResponseCallable(const Model::WriteGetObjectResponseRequest& request) const; /** *

Passes transformed objects to a GetObject operation when using * Object Lambda access points. For information about Object Lambda access points, * see Transforming * objects with Object Lambda access points in the Amazon S3 User * Guide.

This operation supports metadata that can be returned by GetObject, * in addition to RequestRoute, RequestToken, * StatusCode, ErrorCode, and ErrorMessage. * The GetObject response metadata is supported so that the * WriteGetObjectResponse caller, typically an Lambda function, can * provide the same metadata when it internally invokes GetObject. * When WriteGetObjectResponse is called by a customer-owned Lambda * function, the metadata returned to the end user GetObject call * might differ from what Amazon S3 would normally return.

You can include * any number of metadata headers. When including a metadata header, it should be * prefaced with x-amz-meta. For example, * x-amz-meta-my-custom-header: MyCustomValue. The primary use case * for this is to forward GetObject metadata.

Amazon Web * Services provides some prebuilt Lambda functions that you can use with S3 Object * Lambda to detect and redact personally identifiable information (PII) and * decompress S3 objects. These Lambda functions are available in the Amazon Web * Services Serverless Application Repository, and can be selected through the * Amazon Web Services Management Console when you create your Object Lambda access * point.

Example 1: PII Access Control - This Lambda function uses Amazon * Comprehend, a natural language processing (NLP) service using machine learning * to find insights and relationships in text. It automatically detects personally * identifiable information (PII) such as names, addresses, dates, credit card * numbers, and social security numbers from documents in your Amazon S3 bucket. *

Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, * a natural language processing (NLP) service using machine learning to find * insights and relationships in text. It automatically redacts personally * identifiable information (PII) such as names, addresses, dates, credit card * numbers, and social security numbers from documents in your Amazon S3 bucket. *

Example 3: Decompression - The Lambda function * S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in * one of six compressed file formats including bzip2, gzip, snappy, zlib, * zstandard and ZIP.

For information on how to view and use these * functions, see Using * Amazon Web Services built Lambda functions in the Amazon S3 User * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void WriteGetObjectResponseAsync(const Model::WriteGetObjectResponseRequest& request, const WriteGetObjectResponseResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; Aws::String GeneratePresignedUrl(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, long long expirationInSeconds = MAX_EXPIRATION_SECONDS); Aws::String GeneratePresignedUrl(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = MAX_EXPIRATION_SECONDS); /** * Server Side Encryption Headers and Algorithm * Method Algorithm Required Headers * SSE-S3 AES256 x-amz-server-side-encryption:AES256 * SSE-KMS aws:kms x-amz-server-side--encryption:aws:kms, x-amz-server-side-encryption-aws-kms-key-id: * SS3-C AES256 x-amz-server-side-encryption-customer-algorithm:AES256, x-amz-server-side-encryption-customer-key:, x-amz-server-side-encryption-customer-key-MD5: */ /** * Generate presigned URL with Sever Side Encryption(SSE) and with S3 managed keys. * https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: AES256) */ Aws::String GeneratePresignedUrlWithSSES3(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, long long expirationInSeconds = MAX_EXPIRATION_SECONDS); /** * Generate presigned URL with Sever Side Encryption(SSE) and with S3 managed keys. * https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: AES256) * Header: "x-amz-server-side-encryption" will be added internally, don't customize it. */ Aws::String GeneratePresignedUrlWithSSES3(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, long long expirationInSeconds = MAX_EXPIRATION_SECONDS); /** * Generate presigned URL with Server Side Encryption(SSE) and with KMS master key id. * if kmsMasterKeyId is empty, we will end up use the default one generated by KMS for you. You can find it via AWS IAM console, it's the one aliased as "aws/s3". * https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: aws:kms) */ Aws::String GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Aws::String& kmsMasterKeyId = "", long long expirationInSeconds = MAX_EXPIRATION_SECONDS); /** * Generate presigned URL with Server Side Encryption(SSE) and with KMS master key id. * if kmsMasterKeyId is empty, we will end up use the default one generated by KMS for you. You can find it via AWS IAM console, it's the one aliased as "aws/s3". * https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: aws:kms) * Headers: "x-amz-server-side-encryption" and "x-amz-server-side-encryption-aws-kms-key-id" will be added internally, don't customize them. */ Aws::String GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, const Aws::String& kmsMasterKeyId = "", long long expirationInSeconds = MAX_EXPIRATION_SECONDS); /** * Generate presigned URL with Sever Side Encryption(SSE) and with customer supplied Key. * https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: AES256) */ Aws::String GeneratePresignedUrlWithSSEC(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Aws::String& base64EncodedAES256Key, long long expirationInSeconds = MAX_EXPIRATION_SECONDS); /** * Generate presigned URL with Sever Side Encryption(SSE) and with customer supplied Key. * https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: AES256) * Headers: "x-amz-server-side-encryption-customer-algorithm","x-amz-server-side-encryption-customer-key" and "x-amz-server-side-encryption-customer-key-MD5" will be added internally, don't customize them. */ Aws::String GeneratePresignedUrlWithSSEC(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, const Aws::String& base64EncodedAES256Key, long long expirationInSeconds = MAX_EXPIRATION_SECONDS); virtual bool MultipartUploadSupported() const; void OverrideEndpoint(const Aws::String& endpoint); private: void init(const Client::ClientConfiguration& clientConfiguration); void LoadS3SpecificConfig(const Aws::String& profile); ComputeEndpointOutcome ComputeEndpointString(const Aws::String& bucket) const; ComputeEndpointOutcome ComputeEndpointString() const; ComputeEndpointOutcome ComputeEndpointStringWithServiceName(const Aws::String& serviceNameOverride = "") const; void AbortMultipartUploadAsyncHelper(const Model::AbortMultipartUploadRequest& request, const AbortMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CompleteMultipartUploadAsyncHelper(const Model::CompleteMultipartUploadRequest& request, const CompleteMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CopyObjectAsyncHelper(const Model::CopyObjectRequest& request, const CopyObjectResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateBucketAsyncHelper(const Model::CreateBucketRequest& request, const CreateBucketResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateMultipartUploadAsyncHelper(const Model::CreateMultipartUploadRequest& request, const CreateMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketAsyncHelper(const Model::DeleteBucketRequest& request, const DeleteBucketResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketAnalyticsConfigurationAsyncHelper(const Model::DeleteBucketAnalyticsConfigurationRequest& request, const DeleteBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketCorsAsyncHelper(const Model::DeleteBucketCorsRequest& request, const DeleteBucketCorsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketEncryptionAsyncHelper(const Model::DeleteBucketEncryptionRequest& request, const DeleteBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketIntelligentTieringConfigurationAsyncHelper(const Model::DeleteBucketIntelligentTieringConfigurationRequest& request, const DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketInventoryConfigurationAsyncHelper(const Model::DeleteBucketInventoryConfigurationRequest& request, const DeleteBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketLifecycleAsyncHelper(const Model::DeleteBucketLifecycleRequest& request, const DeleteBucketLifecycleResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketMetricsConfigurationAsyncHelper(const Model::DeleteBucketMetricsConfigurationRequest& request, const DeleteBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketOwnershipControlsAsyncHelper(const Model::DeleteBucketOwnershipControlsRequest& request, const DeleteBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketPolicyAsyncHelper(const Model::DeleteBucketPolicyRequest& request, const DeleteBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketReplicationAsyncHelper(const Model::DeleteBucketReplicationRequest& request, const DeleteBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketTaggingAsyncHelper(const Model::DeleteBucketTaggingRequest& request, const DeleteBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteBucketWebsiteAsyncHelper(const Model::DeleteBucketWebsiteRequest& request, const DeleteBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteObjectAsyncHelper(const Model::DeleteObjectRequest& request, const DeleteObjectResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteObjectTaggingAsyncHelper(const Model::DeleteObjectTaggingRequest& request, const DeleteObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteObjectsAsyncHelper(const Model::DeleteObjectsRequest& request, const DeleteObjectsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeletePublicAccessBlockAsyncHelper(const Model::DeletePublicAccessBlockRequest& request, const DeletePublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketAccelerateConfigurationAsyncHelper(const Model::GetBucketAccelerateConfigurationRequest& request, const GetBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketAclAsyncHelper(const Model::GetBucketAclRequest& request, const GetBucketAclResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketAnalyticsConfigurationAsyncHelper(const Model::GetBucketAnalyticsConfigurationRequest& request, const GetBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketCorsAsyncHelper(const Model::GetBucketCorsRequest& request, const GetBucketCorsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketEncryptionAsyncHelper(const Model::GetBucketEncryptionRequest& request, const GetBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketIntelligentTieringConfigurationAsyncHelper(const Model::GetBucketIntelligentTieringConfigurationRequest& request, const GetBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketInventoryConfigurationAsyncHelper(const Model::GetBucketInventoryConfigurationRequest& request, const GetBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketLifecycleConfigurationAsyncHelper(const Model::GetBucketLifecycleConfigurationRequest& request, const GetBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketLocationAsyncHelper(const Model::GetBucketLocationRequest& request, const GetBucketLocationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketLoggingAsyncHelper(const Model::GetBucketLoggingRequest& request, const GetBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketMetricsConfigurationAsyncHelper(const Model::GetBucketMetricsConfigurationRequest& request, const GetBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketNotificationConfigurationAsyncHelper(const Model::GetBucketNotificationConfigurationRequest& request, const GetBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketOwnershipControlsAsyncHelper(const Model::GetBucketOwnershipControlsRequest& request, const GetBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketPolicyAsyncHelper(const Model::GetBucketPolicyRequest& request, const GetBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketPolicyStatusAsyncHelper(const Model::GetBucketPolicyStatusRequest& request, const GetBucketPolicyStatusResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketReplicationAsyncHelper(const Model::GetBucketReplicationRequest& request, const GetBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketRequestPaymentAsyncHelper(const Model::GetBucketRequestPaymentRequest& request, const GetBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketTaggingAsyncHelper(const Model::GetBucketTaggingRequest& request, const GetBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketVersioningAsyncHelper(const Model::GetBucketVersioningRequest& request, const GetBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetBucketWebsiteAsyncHelper(const Model::GetBucketWebsiteRequest& request, const GetBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetObjectAsyncHelper(const Model::GetObjectRequest& request, const GetObjectResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetObjectAclAsyncHelper(const Model::GetObjectAclRequest& request, const GetObjectAclResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetObjectLegalHoldAsyncHelper(const Model::GetObjectLegalHoldRequest& request, const GetObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetObjectLockConfigurationAsyncHelper(const Model::GetObjectLockConfigurationRequest& request, const GetObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetObjectRetentionAsyncHelper(const Model::GetObjectRetentionRequest& request, const GetObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetObjectTaggingAsyncHelper(const Model::GetObjectTaggingRequest& request, const GetObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetObjectTorrentAsyncHelper(const Model::GetObjectTorrentRequest& request, const GetObjectTorrentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetPublicAccessBlockAsyncHelper(const Model::GetPublicAccessBlockRequest& request, const GetPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr& context) const; void HeadBucketAsyncHelper(const Model::HeadBucketRequest& request, const HeadBucketResponseReceivedHandler& handler, const std::shared_ptr& context) const; void HeadObjectAsyncHelper(const Model::HeadObjectRequest& request, const HeadObjectResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListBucketAnalyticsConfigurationsAsyncHelper(const Model::ListBucketAnalyticsConfigurationsRequest& request, const ListBucketAnalyticsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListBucketIntelligentTieringConfigurationsAsyncHelper(const Model::ListBucketIntelligentTieringConfigurationsRequest& request, const ListBucketIntelligentTieringConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListBucketInventoryConfigurationsAsyncHelper(const Model::ListBucketInventoryConfigurationsRequest& request, const ListBucketInventoryConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListBucketMetricsConfigurationsAsyncHelper(const Model::ListBucketMetricsConfigurationsRequest& request, const ListBucketMetricsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListBucketsAsyncHelper(const ListBucketsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListMultipartUploadsAsyncHelper(const Model::ListMultipartUploadsRequest& request, const ListMultipartUploadsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListObjectVersionsAsyncHelper(const Model::ListObjectVersionsRequest& request, const ListObjectVersionsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListObjectsAsyncHelper(const Model::ListObjectsRequest& request, const ListObjectsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListObjectsV2AsyncHelper(const Model::ListObjectsV2Request& request, const ListObjectsV2ResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListPartsAsyncHelper(const Model::ListPartsRequest& request, const ListPartsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketAccelerateConfigurationAsyncHelper(const Model::PutBucketAccelerateConfigurationRequest& request, const PutBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketAclAsyncHelper(const Model::PutBucketAclRequest& request, const PutBucketAclResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketAnalyticsConfigurationAsyncHelper(const Model::PutBucketAnalyticsConfigurationRequest& request, const PutBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketCorsAsyncHelper(const Model::PutBucketCorsRequest& request, const PutBucketCorsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketEncryptionAsyncHelper(const Model::PutBucketEncryptionRequest& request, const PutBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketIntelligentTieringConfigurationAsyncHelper(const Model::PutBucketIntelligentTieringConfigurationRequest& request, const PutBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketInventoryConfigurationAsyncHelper(const Model::PutBucketInventoryConfigurationRequest& request, const PutBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketLifecycleConfigurationAsyncHelper(const Model::PutBucketLifecycleConfigurationRequest& request, const PutBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketLoggingAsyncHelper(const Model::PutBucketLoggingRequest& request, const PutBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketMetricsConfigurationAsyncHelper(const Model::PutBucketMetricsConfigurationRequest& request, const PutBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketNotificationConfigurationAsyncHelper(const Model::PutBucketNotificationConfigurationRequest& request, const PutBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketOwnershipControlsAsyncHelper(const Model::PutBucketOwnershipControlsRequest& request, const PutBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketPolicyAsyncHelper(const Model::PutBucketPolicyRequest& request, const PutBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketReplicationAsyncHelper(const Model::PutBucketReplicationRequest& request, const PutBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketRequestPaymentAsyncHelper(const Model::PutBucketRequestPaymentRequest& request, const PutBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketTaggingAsyncHelper(const Model::PutBucketTaggingRequest& request, const PutBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketVersioningAsyncHelper(const Model::PutBucketVersioningRequest& request, const PutBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutBucketWebsiteAsyncHelper(const Model::PutBucketWebsiteRequest& request, const PutBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutObjectAsyncHelper(const Model::PutObjectRequest& request, const PutObjectResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutObjectAclAsyncHelper(const Model::PutObjectAclRequest& request, const PutObjectAclResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutObjectLegalHoldAsyncHelper(const Model::PutObjectLegalHoldRequest& request, const PutObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutObjectLockConfigurationAsyncHelper(const Model::PutObjectLockConfigurationRequest& request, const PutObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutObjectRetentionAsyncHelper(const Model::PutObjectRetentionRequest& request, const PutObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutObjectTaggingAsyncHelper(const Model::PutObjectTaggingRequest& request, const PutObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutPublicAccessBlockAsyncHelper(const Model::PutPublicAccessBlockRequest& request, const PutPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr& context) const; void RestoreObjectAsyncHelper(const Model::RestoreObjectRequest& request, const RestoreObjectResponseReceivedHandler& handler, const std::shared_ptr& context) const; void SelectObjectContentAsyncHelper(Model::SelectObjectContentRequest& request, const SelectObjectContentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UploadPartAsyncHelper(const Model::UploadPartRequest& request, const UploadPartResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UploadPartCopyAsyncHelper(const Model::UploadPartCopyRequest& request, const UploadPartCopyResponseReceivedHandler& handler, const std::shared_ptr& context) const; void WriteGetObjectResponseAsyncHelper(const Model::WriteGetObjectResponseRequest& request, const WriteGetObjectResponseResponseReceivedHandler& handler, const std::shared_ptr& context) const; Aws::String m_baseUri; Aws::String m_scheme; bool m_enableHostPrefixInjection; Aws::String m_configScheme; std::shared_ptr m_executor; bool m_useVirtualAddressing; bool m_useDualStack; bool m_useArnRegion; bool m_disableMultiRegionAccessPoints; bool m_useCustomEndpoint; Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION m_USEast1RegionalEndpointOption; }; } // namespace S3 } // namespace Aws