/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace DatabaseMigrationService { namespace Model { /** *

Settings for exporting data to Amazon S3.

See Also:

AWS API * Reference

*/ class S3Settings { public: AWS_DATABASEMIGRATIONSERVICE_API S3Settings(); AWS_DATABASEMIGRATIONSERVICE_API S3Settings(Aws::Utils::Json::JsonView jsonValue); AWS_DATABASEMIGRATIONSERVICE_API S3Settings& operator=(Aws::Utils::Json::JsonView jsonValue); AWS_DATABASEMIGRATIONSERVICE_API Aws::Utils::Json::JsonValue Jsonize() const; /** *

The Amazon Resource Name (ARN) used by the service to access the IAM role. * The role must allow the iam:PassRole action. It is a required * parameter that enables DMS to write and read objects from an S3 bucket.

*/ inline const Aws::String& GetServiceAccessRoleArn() const{ return m_serviceAccessRoleArn; } /** *

The Amazon Resource Name (ARN) used by the service to access the IAM role. * The role must allow the iam:PassRole action. It is a required * parameter that enables DMS to write and read objects from an S3 bucket.

*/ inline bool ServiceAccessRoleArnHasBeenSet() const { return m_serviceAccessRoleArnHasBeenSet; } /** *

The Amazon Resource Name (ARN) used by the service to access the IAM role. * The role must allow the iam:PassRole action. It is a required * parameter that enables DMS to write and read objects from an S3 bucket.

*/ inline void SetServiceAccessRoleArn(const Aws::String& value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn = value; } /** *

The Amazon Resource Name (ARN) used by the service to access the IAM role. * The role must allow the iam:PassRole action. It is a required * parameter that enables DMS to write and read objects from an S3 bucket.

*/ inline void SetServiceAccessRoleArn(Aws::String&& value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn = std::move(value); } /** *

The Amazon Resource Name (ARN) used by the service to access the IAM role. * The role must allow the iam:PassRole action. It is a required * parameter that enables DMS to write and read objects from an S3 bucket.

*/ inline void SetServiceAccessRoleArn(const char* value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn.assign(value); } /** *

The Amazon Resource Name (ARN) used by the service to access the IAM role. * The role must allow the iam:PassRole action. It is a required * parameter that enables DMS to write and read objects from an S3 bucket.

*/ inline S3Settings& WithServiceAccessRoleArn(const Aws::String& value) { SetServiceAccessRoleArn(value); return *this;} /** *

The Amazon Resource Name (ARN) used by the service to access the IAM role. * The role must allow the iam:PassRole action. It is a required * parameter that enables DMS to write and read objects from an S3 bucket.

*/ inline S3Settings& WithServiceAccessRoleArn(Aws::String&& value) { SetServiceAccessRoleArn(std::move(value)); return *this;} /** *

The Amazon Resource Name (ARN) used by the service to access the IAM role. * The role must allow the iam:PassRole action. It is a required * parameter that enables DMS to write and read objects from an S3 bucket.

*/ inline S3Settings& WithServiceAccessRoleArn(const char* value) { SetServiceAccessRoleArn(value); return *this;} /** *

Specifies how tables are defined in the S3 source files only.

*/ inline const Aws::String& GetExternalTableDefinition() const{ return m_externalTableDefinition; } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline bool ExternalTableDefinitionHasBeenSet() const { return m_externalTableDefinitionHasBeenSet; } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline void SetExternalTableDefinition(const Aws::String& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = value; } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline void SetExternalTableDefinition(Aws::String&& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = std::move(value); } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline void SetExternalTableDefinition(const char* value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition.assign(value); } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline S3Settings& WithExternalTableDefinition(const Aws::String& value) { SetExternalTableDefinition(value); return *this;} /** *

Specifies how tables are defined in the S3 source files only.

*/ inline S3Settings& WithExternalTableDefinition(Aws::String&& value) { SetExternalTableDefinition(std::move(value)); return *this;} /** *

Specifies how tables are defined in the S3 source files only.

*/ inline S3Settings& WithExternalTableDefinition(const char* value) { SetExternalTableDefinition(value); return *this;} /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline const Aws::String& GetCsvRowDelimiter() const{ return m_csvRowDelimiter; } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline bool CsvRowDelimiterHasBeenSet() const { return m_csvRowDelimiterHasBeenSet; } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline void SetCsvRowDelimiter(const Aws::String& value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter = value; } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline void SetCsvRowDelimiter(Aws::String&& value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter = std::move(value); } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline void SetCsvRowDelimiter(const char* value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter.assign(value); } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline S3Settings& WithCsvRowDelimiter(const Aws::String& value) { SetCsvRowDelimiter(value); return *this;} /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline S3Settings& WithCsvRowDelimiter(Aws::String&& value) { SetCsvRowDelimiter(std::move(value)); return *this;} /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline S3Settings& WithCsvRowDelimiter(const char* value) { SetCsvRowDelimiter(value); return *this;} /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline const Aws::String& GetCsvDelimiter() const{ return m_csvDelimiter; } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline bool CsvDelimiterHasBeenSet() const { return m_csvDelimiterHasBeenSet; } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline void SetCsvDelimiter(const Aws::String& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = value; } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline void SetCsvDelimiter(Aws::String&& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = std::move(value); } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline void SetCsvDelimiter(const char* value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter.assign(value); } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline S3Settings& WithCsvDelimiter(const Aws::String& value) { SetCsvDelimiter(value); return *this;} /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline S3Settings& WithCsvDelimiter(Aws::String&& value) { SetCsvDelimiter(std::move(value)); return *this;} /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline S3Settings& WithCsvDelimiter(const char* value) { SetCsvDelimiter(value); return *this;} /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline const Aws::String& GetBucketFolder() const{ return m_bucketFolder; } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline bool BucketFolderHasBeenSet() const { return m_bucketFolderHasBeenSet; } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline void SetBucketFolder(const Aws::String& value) { m_bucketFolderHasBeenSet = true; m_bucketFolder = value; } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline void SetBucketFolder(Aws::String&& value) { m_bucketFolderHasBeenSet = true; m_bucketFolder = std::move(value); } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline void SetBucketFolder(const char* value) { m_bucketFolderHasBeenSet = true; m_bucketFolder.assign(value); } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline S3Settings& WithBucketFolder(const Aws::String& value) { SetBucketFolder(value); return *this;} /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline S3Settings& WithBucketFolder(Aws::String&& value) { SetBucketFolder(std::move(value)); return *this;} /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline S3Settings& WithBucketFolder(const char* value) { SetBucketFolder(value); return *this;} /** *

The name of the S3 bucket.

*/ inline const Aws::String& GetBucketName() const{ return m_bucketName; } /** *

The name of the S3 bucket.

*/ inline bool BucketNameHasBeenSet() const { return m_bucketNameHasBeenSet; } /** *

The name of the S3 bucket.

*/ inline void SetBucketName(const Aws::String& value) { m_bucketNameHasBeenSet = true; m_bucketName = value; } /** *

The name of the S3 bucket.

*/ inline void SetBucketName(Aws::String&& value) { m_bucketNameHasBeenSet = true; m_bucketName = std::move(value); } /** *

The name of the S3 bucket.

*/ inline void SetBucketName(const char* value) { m_bucketNameHasBeenSet = true; m_bucketName.assign(value); } /** *

The name of the S3 bucket.

*/ inline S3Settings& WithBucketName(const Aws::String& value) { SetBucketName(value); return *this;} /** *

The name of the S3 bucket.

*/ inline S3Settings& WithBucketName(Aws::String&& value) { SetBucketName(std::move(value)); return *this;} /** *

The name of the S3 bucket.

*/ inline S3Settings& WithBucketName(const char* value) { SetBucketName(value); return *this;} /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline const CompressionTypeValue& GetCompressionType() const{ return m_compressionType; } /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline bool CompressionTypeHasBeenSet() const { return m_compressionTypeHasBeenSet; } /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline void SetCompressionType(const CompressionTypeValue& value) { m_compressionTypeHasBeenSet = true; m_compressionType = value; } /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline void SetCompressionType(CompressionTypeValue&& value) { m_compressionTypeHasBeenSet = true; m_compressionType = std::move(value); } /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline S3Settings& WithCompressionType(const CompressionTypeValue& value) { SetCompressionType(value); return *this;} /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline S3Settings& WithCompressionType(CompressionTypeValue&& value) { SetCompressionType(std::move(value)); return *this;} /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an Identity and Access Management (IAM) role with * permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline const EncryptionModeValue& GetEncryptionMode() const{ return m_encryptionMode; } /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an Identity and Access Management (IAM) role with * permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline bool EncryptionModeHasBeenSet() const { return m_encryptionModeHasBeenSet; } /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an Identity and Access Management (IAM) role with * permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline void SetEncryptionMode(const EncryptionModeValue& value) { m_encryptionModeHasBeenSet = true; m_encryptionMode = value; } /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an Identity and Access Management (IAM) role with * permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline void SetEncryptionMode(EncryptionModeValue&& value) { m_encryptionModeHasBeenSet = true; m_encryptionMode = std::move(value); } /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an Identity and Access Management (IAM) role with * permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline S3Settings& WithEncryptionMode(const EncryptionModeValue& value) { SetEncryptionMode(value); return *this;} /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an Identity and Access Management (IAM) role with * permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline S3Settings& WithEncryptionMode(EncryptionModeValue&& value) { SetEncryptionMode(std::move(value)); return *this;} /** *

If you are using SSE_KMS for the EncryptionMode, * provide the KMS key ID. The key that you use needs an attached policy that * enables Identity and Access Management (IAM) user permissions and allows use of * the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline const Aws::String& GetServerSideEncryptionKmsKeyId() const{ return m_serverSideEncryptionKmsKeyId; } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the KMS key ID. The key that you use needs an attached policy that * enables Identity and Access Management (IAM) user permissions and allows use of * the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline bool ServerSideEncryptionKmsKeyIdHasBeenSet() const { return m_serverSideEncryptionKmsKeyIdHasBeenSet; } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the KMS key ID. The key that you use needs an attached policy that * enables Identity and Access Management (IAM) user permissions and allows use of * the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline void SetServerSideEncryptionKmsKeyId(const Aws::String& value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId = value; } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the KMS key ID. The key that you use needs an attached policy that * enables Identity and Access Management (IAM) user permissions and allows use of * the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline void SetServerSideEncryptionKmsKeyId(Aws::String&& value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId = std::move(value); } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the KMS key ID. The key that you use needs an attached policy that * enables Identity and Access Management (IAM) user permissions and allows use of * the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline void SetServerSideEncryptionKmsKeyId(const char* value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId.assign(value); } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the KMS key ID. The key that you use needs an attached policy that * enables Identity and Access Management (IAM) user permissions and allows use of * the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline S3Settings& WithServerSideEncryptionKmsKeyId(const Aws::String& value) { SetServerSideEncryptionKmsKeyId(value); return *this;} /** *

If you are using SSE_KMS for the EncryptionMode, * provide the KMS key ID. The key that you use needs an attached policy that * enables Identity and Access Management (IAM) user permissions and allows use of * the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline S3Settings& WithServerSideEncryptionKmsKeyId(Aws::String&& value) { SetServerSideEncryptionKmsKeyId(std::move(value)); return *this;} /** *

If you are using SSE_KMS for the EncryptionMode, * provide the KMS key ID. The key that you use needs an attached policy that * enables Identity and Access Management (IAM) user permissions and allows use of * the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline S3Settings& WithServerSideEncryptionKmsKeyId(const char* value) { SetServerSideEncryptionKmsKeyId(value); return *this;} /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline const DataFormatValue& GetDataFormat() const{ return m_dataFormat; } /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline bool DataFormatHasBeenSet() const { return m_dataFormatHasBeenSet; } /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline void SetDataFormat(const DataFormatValue& value) { m_dataFormatHasBeenSet = true; m_dataFormat = value; } /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline void SetDataFormat(DataFormatValue&& value) { m_dataFormatHasBeenSet = true; m_dataFormat = std::move(value); } /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline S3Settings& WithDataFormat(const DataFormatValue& value) { SetDataFormat(value); return *this;} /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline S3Settings& WithDataFormat(DataFormatValue&& value) { SetDataFormat(std::move(value)); return *this;} /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline const EncodingTypeValue& GetEncodingType() const{ return m_encodingType; } /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline bool EncodingTypeHasBeenSet() const { return m_encodingTypeHasBeenSet; } /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline void SetEncodingType(const EncodingTypeValue& value) { m_encodingTypeHasBeenSet = true; m_encodingType = value; } /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline void SetEncodingType(EncodingTypeValue&& value) { m_encodingTypeHasBeenSet = true; m_encodingType = std::move(value); } /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline S3Settings& WithEncodingType(const EncodingTypeValue& value) { SetEncodingType(value); return *this;} /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline S3Settings& WithEncodingType(EncodingTypeValue&& value) { SetEncodingType(std::move(value)); return *this;} /** *

The maximum size of an encoded dictionary page of a column. If the dictionary * page exceeds this, this column is stored using an encoding type of * PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN * encoding. This size is used for .parquet file format only.

*/ inline int GetDictPageSizeLimit() const{ return m_dictPageSizeLimit; } /** *

The maximum size of an encoded dictionary page of a column. If the dictionary * page exceeds this, this column is stored using an encoding type of * PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN * encoding. This size is used for .parquet file format only.

*/ inline bool DictPageSizeLimitHasBeenSet() const { return m_dictPageSizeLimitHasBeenSet; } /** *

The maximum size of an encoded dictionary page of a column. If the dictionary * page exceeds this, this column is stored using an encoding type of * PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN * encoding. This size is used for .parquet file format only.

*/ inline void SetDictPageSizeLimit(int value) { m_dictPageSizeLimitHasBeenSet = true; m_dictPageSizeLimit = value; } /** *

The maximum size of an encoded dictionary page of a column. If the dictionary * page exceeds this, this column is stored using an encoding type of * PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN * encoding. This size is used for .parquet file format only.

*/ inline S3Settings& WithDictPageSizeLimit(int value) { SetDictPageSizeLimit(value); return *this;} /** *

The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.

If you choose a value larger than the maximum, * RowGroupLength is set to the max row group length in bytes (64 * * 1024 * 1024).

*/ inline int GetRowGroupLength() const{ return m_rowGroupLength; } /** *

The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.

If you choose a value larger than the maximum, * RowGroupLength is set to the max row group length in bytes (64 * * 1024 * 1024).

*/ inline bool RowGroupLengthHasBeenSet() const { return m_rowGroupLengthHasBeenSet; } /** *

The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.

If you choose a value larger than the maximum, * RowGroupLength is set to the max row group length in bytes (64 * * 1024 * 1024).

*/ inline void SetRowGroupLength(int value) { m_rowGroupLengthHasBeenSet = true; m_rowGroupLength = value; } /** *

The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.

If you choose a value larger than the maximum, * RowGroupLength is set to the max row group length in bytes (64 * * 1024 * 1024).

*/ inline S3Settings& WithRowGroupLength(int value) { SetRowGroupLength(value); return *this;} /** *

The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.

*/ inline int GetDataPageSize() const{ return m_dataPageSize; } /** *

The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.

*/ inline bool DataPageSizeHasBeenSet() const { return m_dataPageSizeHasBeenSet; } /** *

The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.

*/ inline void SetDataPageSize(int value) { m_dataPageSizeHasBeenSet = true; m_dataPageSize = value; } /** *

The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.

*/ inline S3Settings& WithDataPageSize(int value) { SetDataPageSize(value); return *this;} /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline const ParquetVersionValue& GetParquetVersion() const{ return m_parquetVersion; } /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline bool ParquetVersionHasBeenSet() const { return m_parquetVersionHasBeenSet; } /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline void SetParquetVersion(const ParquetVersionValue& value) { m_parquetVersionHasBeenSet = true; m_parquetVersion = value; } /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline void SetParquetVersion(ParquetVersionValue&& value) { m_parquetVersionHasBeenSet = true; m_parquetVersion = std::move(value); } /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline S3Settings& WithParquetVersion(const ParquetVersionValue& value) { SetParquetVersion(value); return *this;} /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline S3Settings& WithParquetVersion(ParquetVersionValue&& value) { SetParquetVersion(std::move(value)); return *this;} /** *

A value that enables statistics for Parquet pages and row groups. Choose * true to enable statistics, false to disable. * Statistics include NULL, DISTINCT, MAX, * and MIN values. This parameter defaults to true. This * value is used for .parquet file format only.

*/ inline bool GetEnableStatistics() const{ return m_enableStatistics; } /** *

A value that enables statistics for Parquet pages and row groups. Choose * true to enable statistics, false to disable. * Statistics include NULL, DISTINCT, MAX, * and MIN values. This parameter defaults to true. This * value is used for .parquet file format only.

*/ inline bool EnableStatisticsHasBeenSet() const { return m_enableStatisticsHasBeenSet; } /** *

A value that enables statistics for Parquet pages and row groups. Choose * true to enable statistics, false to disable. * Statistics include NULL, DISTINCT, MAX, * and MIN values. This parameter defaults to true. This * value is used for .parquet file format only.

*/ inline void SetEnableStatistics(bool value) { m_enableStatisticsHasBeenSet = true; m_enableStatistics = value; } /** *

A value that enables statistics for Parquet pages and row groups. Choose * true to enable statistics, false to disable. * Statistics include NULL, DISTINCT, MAX, * and MIN values. This parameter defaults to true. This * value is used for .parquet file format only.

*/ inline S3Settings& WithEnableStatistics(bool value) { SetEnableStatistics(value); return *this;} /** *

A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) or .parquet output files only to indicate how the * rows were added to the source database.

DMS supports the * IncludeOpForFullLoad parameter in versions 3.1.4 and later.

*

DMS supports the use of the .parquet files with the * IncludeOpForFullLoad parameter in versions 3.4.7 and later.

*

For full load, records can only be inserted. By default (the * false setting), no information is recorded in these output files * for a full load to indicate that the rows were inserted at the source database. * If IncludeOpForFullLoad is set to true or * y, the INSERT is recorded as an I annotation in the first field of * the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load.

This * setting works together with the CdcInsertsOnly and the * CdcInsertsAndUpdates parameters for output to .csv files only. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

*/ inline bool GetIncludeOpForFullLoad() const{ return m_includeOpForFullLoad; } /** *

A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) or .parquet output files only to indicate how the * rows were added to the source database.

DMS supports the * IncludeOpForFullLoad parameter in versions 3.1.4 and later.

*

DMS supports the use of the .parquet files with the * IncludeOpForFullLoad parameter in versions 3.4.7 and later.

*

For full load, records can only be inserted. By default (the * false setting), no information is recorded in these output files * for a full load to indicate that the rows were inserted at the source database. * If IncludeOpForFullLoad is set to true or * y, the INSERT is recorded as an I annotation in the first field of * the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load.

This * setting works together with the CdcInsertsOnly and the * CdcInsertsAndUpdates parameters for output to .csv files only. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

*/ inline bool IncludeOpForFullLoadHasBeenSet() const { return m_includeOpForFullLoadHasBeenSet; } /** *

A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) or .parquet output files only to indicate how the * rows were added to the source database.

DMS supports the * IncludeOpForFullLoad parameter in versions 3.1.4 and later.

*

DMS supports the use of the .parquet files with the * IncludeOpForFullLoad parameter in versions 3.4.7 and later.

*

For full load, records can only be inserted. By default (the * false setting), no information is recorded in these output files * for a full load to indicate that the rows were inserted at the source database. * If IncludeOpForFullLoad is set to true or * y, the INSERT is recorded as an I annotation in the first field of * the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load.

This * setting works together with the CdcInsertsOnly and the * CdcInsertsAndUpdates parameters for output to .csv files only. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

*/ inline void SetIncludeOpForFullLoad(bool value) { m_includeOpForFullLoadHasBeenSet = true; m_includeOpForFullLoad = value; } /** *

A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) or .parquet output files only to indicate how the * rows were added to the source database.

DMS supports the * IncludeOpForFullLoad parameter in versions 3.1.4 and later.

*

DMS supports the use of the .parquet files with the * IncludeOpForFullLoad parameter in versions 3.4.7 and later.

*

For full load, records can only be inserted. By default (the * false setting), no information is recorded in these output files * for a full load to indicate that the rows were inserted at the source database. * If IncludeOpForFullLoad is set to true or * y, the INSERT is recorded as an I annotation in the first field of * the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load.

This * setting works together with the CdcInsertsOnly and the * CdcInsertsAndUpdates parameters for output to .csv files only. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

*/ inline S3Settings& WithIncludeOpForFullLoad(bool value) { SetIncludeOpForFullLoad(value); return *this;} /** *

A value that enables a change data capture (CDC) load to write only INSERT * operations to .csv or columnar storage (.parquet) output files. By default (the * false setting), the first field in a .csv or .parquet record * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate * whether the row was inserted, updated, or deleted at the source database for a * CDC load to the target.

If CdcInsertsOnly is set to * true or y, only INSERTs from the source database are * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs * are recorded depends on the value of IncludeOpForFullLoad. If * IncludeOpForFullLoad is set to true, the first field * of every CDC record is set to I to indicate the INSERT operation at the source. * If IncludeOpForFullLoad is set to false, every CDC * record is written without a first field to indicate the INSERT operation at the * source. For more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

DMS supports the interaction described * preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later.

*

CdcInsertsOnly and CdcInsertsAndUpdates can't both * be set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline bool GetCdcInsertsOnly() const{ return m_cdcInsertsOnly; } /** *

A value that enables a change data capture (CDC) load to write only INSERT * operations to .csv or columnar storage (.parquet) output files. By default (the * false setting), the first field in a .csv or .parquet record * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate * whether the row was inserted, updated, or deleted at the source database for a * CDC load to the target.

If CdcInsertsOnly is set to * true or y, only INSERTs from the source database are * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs * are recorded depends on the value of IncludeOpForFullLoad. If * IncludeOpForFullLoad is set to true, the first field * of every CDC record is set to I to indicate the INSERT operation at the source. * If IncludeOpForFullLoad is set to false, every CDC * record is written without a first field to indicate the INSERT operation at the * source. For more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

DMS supports the interaction described * preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later.

*

CdcInsertsOnly and CdcInsertsAndUpdates can't both * be set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline bool CdcInsertsOnlyHasBeenSet() const { return m_cdcInsertsOnlyHasBeenSet; } /** *

A value that enables a change data capture (CDC) load to write only INSERT * operations to .csv or columnar storage (.parquet) output files. By default (the * false setting), the first field in a .csv or .parquet record * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate * whether the row was inserted, updated, or deleted at the source database for a * CDC load to the target.

If CdcInsertsOnly is set to * true or y, only INSERTs from the source database are * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs * are recorded depends on the value of IncludeOpForFullLoad. If * IncludeOpForFullLoad is set to true, the first field * of every CDC record is set to I to indicate the INSERT operation at the source. * If IncludeOpForFullLoad is set to false, every CDC * record is written without a first field to indicate the INSERT operation at the * source. For more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

DMS supports the interaction described * preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later.

*

CdcInsertsOnly and CdcInsertsAndUpdates can't both * be set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline void SetCdcInsertsOnly(bool value) { m_cdcInsertsOnlyHasBeenSet = true; m_cdcInsertsOnly = value; } /** *

A value that enables a change data capture (CDC) load to write only INSERT * operations to .csv or columnar storage (.parquet) output files. By default (the * false setting), the first field in a .csv or .parquet record * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate * whether the row was inserted, updated, or deleted at the source database for a * CDC load to the target.

If CdcInsertsOnly is set to * true or y, only INSERTs from the source database are * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs * are recorded depends on the value of IncludeOpForFullLoad. If * IncludeOpForFullLoad is set to true, the first field * of every CDC record is set to I to indicate the INSERT operation at the source. * If IncludeOpForFullLoad is set to false, every CDC * record is written without a first field to indicate the INSERT operation at the * source. For more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

DMS supports the interaction described * preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later.

*

CdcInsertsOnly and CdcInsertsAndUpdates can't both * be set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline S3Settings& WithCdcInsertsOnly(bool value) { SetCdcInsertsOnly(value); return *this;} /** *

A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline const Aws::String& GetTimestampColumnName() const{ return m_timestampColumnName; } /** *

A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline bool TimestampColumnNameHasBeenSet() const { return m_timestampColumnNameHasBeenSet; } /** *

A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline void SetTimestampColumnName(const Aws::String& value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName = value; } /** *

A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline void SetTimestampColumnName(Aws::String&& value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName = std::move(value); } /** *

A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline void SetTimestampColumnName(const char* value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName.assign(value); } /** *

A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline S3Settings& WithTimestampColumnName(const Aws::String& value) { SetTimestampColumnName(value); return *this;} /** *

A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline S3Settings& WithTimestampColumnName(Aws::String&& value) { SetTimestampColumnName(std::move(value)); return *this;} /** *

A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline S3Settings& WithTimestampColumnName(const char* value) { SetTimestampColumnName(value); return *this;} /** *

A value that specifies the precision of any TIMESTAMP column * values that are written to an Amazon S3 object file in .parquet format.

*

DMS supports the ParquetTimestampInMillisecond parameter * in versions 3.1.4 and later.

When * ParquetTimestampInMillisecond is set to true or * y, DMS writes all TIMESTAMP columns in a .parquet * formatted file with millisecond precision. Otherwise, DMS writes them with * microsecond precision.

Currently, Amazon Athena and Glue can handle only * millisecond precision for TIMESTAMP values. Set this parameter to * true for S3 endpoint object files that are .parquet formatted only * if you plan to query or process the data with Athena or Glue.

DMS * writes any TIMESTAMP column values written to an S3 file in .csv * format with microsecond precision.

Setting * ParquetTimestampInMillisecond has no effect on the string format of * the timestamp column value that is inserted by setting the * TimestampColumnName parameter.

*/ inline bool GetParquetTimestampInMillisecond() const{ return m_parquetTimestampInMillisecond; } /** *

A value that specifies the precision of any TIMESTAMP column * values that are written to an Amazon S3 object file in .parquet format.

*

DMS supports the ParquetTimestampInMillisecond parameter * in versions 3.1.4 and later.

When * ParquetTimestampInMillisecond is set to true or * y, DMS writes all TIMESTAMP columns in a .parquet * formatted file with millisecond precision. Otherwise, DMS writes them with * microsecond precision.

Currently, Amazon Athena and Glue can handle only * millisecond precision for TIMESTAMP values. Set this parameter to * true for S3 endpoint object files that are .parquet formatted only * if you plan to query or process the data with Athena or Glue.

DMS * writes any TIMESTAMP column values written to an S3 file in .csv * format with microsecond precision.

Setting * ParquetTimestampInMillisecond has no effect on the string format of * the timestamp column value that is inserted by setting the * TimestampColumnName parameter.

*/ inline bool ParquetTimestampInMillisecondHasBeenSet() const { return m_parquetTimestampInMillisecondHasBeenSet; } /** *

A value that specifies the precision of any TIMESTAMP column * values that are written to an Amazon S3 object file in .parquet format.

*

DMS supports the ParquetTimestampInMillisecond parameter * in versions 3.1.4 and later.

When * ParquetTimestampInMillisecond is set to true or * y, DMS writes all TIMESTAMP columns in a .parquet * formatted file with millisecond precision. Otherwise, DMS writes them with * microsecond precision.

Currently, Amazon Athena and Glue can handle only * millisecond precision for TIMESTAMP values. Set this parameter to * true for S3 endpoint object files that are .parquet formatted only * if you plan to query or process the data with Athena or Glue.

DMS * writes any TIMESTAMP column values written to an S3 file in .csv * format with microsecond precision.

Setting * ParquetTimestampInMillisecond has no effect on the string format of * the timestamp column value that is inserted by setting the * TimestampColumnName parameter.

*/ inline void SetParquetTimestampInMillisecond(bool value) { m_parquetTimestampInMillisecondHasBeenSet = true; m_parquetTimestampInMillisecond = value; } /** *

A value that specifies the precision of any TIMESTAMP column * values that are written to an Amazon S3 object file in .parquet format.

*

DMS supports the ParquetTimestampInMillisecond parameter * in versions 3.1.4 and later.

When * ParquetTimestampInMillisecond is set to true or * y, DMS writes all TIMESTAMP columns in a .parquet * formatted file with millisecond precision. Otherwise, DMS writes them with * microsecond precision.

Currently, Amazon Athena and Glue can handle only * millisecond precision for TIMESTAMP values. Set this parameter to * true for S3 endpoint object files that are .parquet formatted only * if you plan to query or process the data with Athena or Glue.

DMS * writes any TIMESTAMP column values written to an S3 file in .csv * format with microsecond precision.

Setting * ParquetTimestampInMillisecond has no effect on the string format of * the timestamp column value that is inserted by setting the * TimestampColumnName parameter.

*/ inline S3Settings& WithParquetTimestampInMillisecond(bool value) { SetParquetTimestampInMillisecond(value); return *this;} /** *

A value that enables a change data capture (CDC) load to write INSERT and * UPDATE operations to .csv or .parquet (columnar storage) output files. The * default setting is false, but when * CdcInsertsAndUpdates is set to true or y, * only INSERTs and UPDATEs from the source database are migrated to the .csv or * .parquet file.

DMS supports the use of the .parquet files in * versions 3.4.7 and later.

How these INSERTs and UPDATEs are * recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the * first field of every CDC record is set to either I or * U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are * written without an indication of INSERT or UPDATE operations at the source. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

DMS supports the use of the * CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be * set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline bool GetCdcInsertsAndUpdates() const{ return m_cdcInsertsAndUpdates; } /** *

A value that enables a change data capture (CDC) load to write INSERT and * UPDATE operations to .csv or .parquet (columnar storage) output files. The * default setting is false, but when * CdcInsertsAndUpdates is set to true or y, * only INSERTs and UPDATEs from the source database are migrated to the .csv or * .parquet file.

DMS supports the use of the .parquet files in * versions 3.4.7 and later.

How these INSERTs and UPDATEs are * recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the * first field of every CDC record is set to either I or * U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are * written without an indication of INSERT or UPDATE operations at the source. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

DMS supports the use of the * CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be * set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline bool CdcInsertsAndUpdatesHasBeenSet() const { return m_cdcInsertsAndUpdatesHasBeenSet; } /** *

A value that enables a change data capture (CDC) load to write INSERT and * UPDATE operations to .csv or .parquet (columnar storage) output files. The * default setting is false, but when * CdcInsertsAndUpdates is set to true or y, * only INSERTs and UPDATEs from the source database are migrated to the .csv or * .parquet file.

DMS supports the use of the .parquet files in * versions 3.4.7 and later.

How these INSERTs and UPDATEs are * recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the * first field of every CDC record is set to either I or * U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are * written without an indication of INSERT or UPDATE operations at the source. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

DMS supports the use of the * CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be * set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline void SetCdcInsertsAndUpdates(bool value) { m_cdcInsertsAndUpdatesHasBeenSet = true; m_cdcInsertsAndUpdates = value; } /** *

A value that enables a change data capture (CDC) load to write INSERT and * UPDATE operations to .csv or .parquet (columnar storage) output files. The * default setting is false, but when * CdcInsertsAndUpdates is set to true or y, * only INSERTs and UPDATEs from the source database are migrated to the .csv or * .parquet file.

DMS supports the use of the .parquet files in * versions 3.4.7 and later.

How these INSERTs and UPDATEs are * recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the * first field of every CDC record is set to either I or * U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are * written without an indication of INSERT or UPDATE operations at the source. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the Database Migration * Service User Guide..

DMS supports the use of the * CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be * set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline S3Settings& WithCdcInsertsAndUpdates(bool value) { SetCdcInsertsAndUpdates(value); return *this;} /** *

When set to true, this parameter partitions S3 bucket folders * based on transaction commit dates. The default value is false. For * more information about date-based folder partitioning, see Using * date-based folder partitioning.

*/ inline bool GetDatePartitionEnabled() const{ return m_datePartitionEnabled; } /** *

When set to true, this parameter partitions S3 bucket folders * based on transaction commit dates. The default value is false. For * more information about date-based folder partitioning, see Using * date-based folder partitioning.

*/ inline bool DatePartitionEnabledHasBeenSet() const { return m_datePartitionEnabledHasBeenSet; } /** *

When set to true, this parameter partitions S3 bucket folders * based on transaction commit dates. The default value is false. For * more information about date-based folder partitioning, see Using * date-based folder partitioning.

*/ inline void SetDatePartitionEnabled(bool value) { m_datePartitionEnabledHasBeenSet = true; m_datePartitionEnabled = value; } /** *

When set to true, this parameter partitions S3 bucket folders * based on transaction commit dates. The default value is false. For * more information about date-based folder partitioning, see Using * date-based folder partitioning.

*/ inline S3Settings& WithDatePartitionEnabled(bool value) { SetDatePartitionEnabled(value); return *this;} /** *

Identifies the sequence of the date format to use during folder partitioning. * The default value is YYYYMMDD. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline const DatePartitionSequenceValue& GetDatePartitionSequence() const{ return m_datePartitionSequence; } /** *

Identifies the sequence of the date format to use during folder partitioning. * The default value is YYYYMMDD. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline bool DatePartitionSequenceHasBeenSet() const { return m_datePartitionSequenceHasBeenSet; } /** *

Identifies the sequence of the date format to use during folder partitioning. * The default value is YYYYMMDD. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline void SetDatePartitionSequence(const DatePartitionSequenceValue& value) { m_datePartitionSequenceHasBeenSet = true; m_datePartitionSequence = value; } /** *

Identifies the sequence of the date format to use during folder partitioning. * The default value is YYYYMMDD. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline void SetDatePartitionSequence(DatePartitionSequenceValue&& value) { m_datePartitionSequenceHasBeenSet = true; m_datePartitionSequence = std::move(value); } /** *

Identifies the sequence of the date format to use during folder partitioning. * The default value is YYYYMMDD. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline S3Settings& WithDatePartitionSequence(const DatePartitionSequenceValue& value) { SetDatePartitionSequence(value); return *this;} /** *

Identifies the sequence of the date format to use during folder partitioning. * The default value is YYYYMMDD. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline S3Settings& WithDatePartitionSequence(DatePartitionSequenceValue&& value) { SetDatePartitionSequence(std::move(value)); return *this;} /** *

Specifies a date separating delimiter to use during folder partitioning. The * default value is SLASH. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline const DatePartitionDelimiterValue& GetDatePartitionDelimiter() const{ return m_datePartitionDelimiter; } /** *

Specifies a date separating delimiter to use during folder partitioning. The * default value is SLASH. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline bool DatePartitionDelimiterHasBeenSet() const { return m_datePartitionDelimiterHasBeenSet; } /** *

Specifies a date separating delimiter to use during folder partitioning. The * default value is SLASH. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline void SetDatePartitionDelimiter(const DatePartitionDelimiterValue& value) { m_datePartitionDelimiterHasBeenSet = true; m_datePartitionDelimiter = value; } /** *

Specifies a date separating delimiter to use during folder partitioning. The * default value is SLASH. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline void SetDatePartitionDelimiter(DatePartitionDelimiterValue&& value) { m_datePartitionDelimiterHasBeenSet = true; m_datePartitionDelimiter = std::move(value); } /** *

Specifies a date separating delimiter to use during folder partitioning. The * default value is SLASH. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline S3Settings& WithDatePartitionDelimiter(const DatePartitionDelimiterValue& value) { SetDatePartitionDelimiter(value); return *this;} /** *

Specifies a date separating delimiter to use during folder partitioning. The * default value is SLASH. Use this parameter when * DatePartitionedEnabled is set to true.

*/ inline S3Settings& WithDatePartitionDelimiter(DatePartitionDelimiterValue&& value) { SetDatePartitionDelimiter(std::move(value)); return *this;} /** *

This setting applies if the S3 output files during a change data capture * (CDC) load are written in .csv format. If set to true for columns * not included in the supplemental log, DMS uses the value specified by * CsvNoSupValue . If not set or set to false, DMS * uses the null value for these columns.

This setting is supported * in DMS versions 3.4.1 and later.

*/ inline bool GetUseCsvNoSupValue() const{ return m_useCsvNoSupValue; } /** *

This setting applies if the S3 output files during a change data capture * (CDC) load are written in .csv format. If set to true for columns * not included in the supplemental log, DMS uses the value specified by * CsvNoSupValue . If not set or set to false, DMS * uses the null value for these columns.

This setting is supported * in DMS versions 3.4.1 and later.

*/ inline bool UseCsvNoSupValueHasBeenSet() const { return m_useCsvNoSupValueHasBeenSet; } /** *

This setting applies if the S3 output files during a change data capture * (CDC) load are written in .csv format. If set to true for columns * not included in the supplemental log, DMS uses the value specified by * CsvNoSupValue . If not set or set to false, DMS * uses the null value for these columns.

This setting is supported * in DMS versions 3.4.1 and later.

*/ inline void SetUseCsvNoSupValue(bool value) { m_useCsvNoSupValueHasBeenSet = true; m_useCsvNoSupValue = value; } /** *

This setting applies if the S3 output files during a change data capture * (CDC) load are written in .csv format. If set to true for columns * not included in the supplemental log, DMS uses the value specified by * CsvNoSupValue . If not set or set to false, DMS * uses the null value for these columns.

This setting is supported * in DMS versions 3.4.1 and later.

*/ inline S3Settings& WithUseCsvNoSupValue(bool value) { SetUseCsvNoSupValue(value); return *this;} /** *

This setting only applies if your Amazon S3 output files during a change data * capture (CDC) load are written in .csv format. If * UseCsvNoSupValue is set to true, specify a string value that * you want DMS to use for all columns not included in the supplemental log. If you * do not specify a string value, DMS uses the null value for these columns * regardless of the UseCsvNoSupValue setting.

This * setting is supported in DMS versions 3.4.1 and later.

*/ inline const Aws::String& GetCsvNoSupValue() const{ return m_csvNoSupValue; } /** *

This setting only applies if your Amazon S3 output files during a change data * capture (CDC) load are written in .csv format. If * UseCsvNoSupValue is set to true, specify a string value that * you want DMS to use for all columns not included in the supplemental log. If you * do not specify a string value, DMS uses the null value for these columns * regardless of the UseCsvNoSupValue setting.

This * setting is supported in DMS versions 3.4.1 and later.

*/ inline bool CsvNoSupValueHasBeenSet() const { return m_csvNoSupValueHasBeenSet; } /** *

This setting only applies if your Amazon S3 output files during a change data * capture (CDC) load are written in .csv format. If * UseCsvNoSupValue is set to true, specify a string value that * you want DMS to use for all columns not included in the supplemental log. If you * do not specify a string value, DMS uses the null value for these columns * regardless of the UseCsvNoSupValue setting.

This * setting is supported in DMS versions 3.4.1 and later.

*/ inline void SetCsvNoSupValue(const Aws::String& value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue = value; } /** *

This setting only applies if your Amazon S3 output files during a change data * capture (CDC) load are written in .csv format. If * UseCsvNoSupValue is set to true, specify a string value that * you want DMS to use for all columns not included in the supplemental log. If you * do not specify a string value, DMS uses the null value for these columns * regardless of the UseCsvNoSupValue setting.

This * setting is supported in DMS versions 3.4.1 and later.

*/ inline void SetCsvNoSupValue(Aws::String&& value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue = std::move(value); } /** *

This setting only applies if your Amazon S3 output files during a change data * capture (CDC) load are written in .csv format. If * UseCsvNoSupValue is set to true, specify a string value that * you want DMS to use for all columns not included in the supplemental log. If you * do not specify a string value, DMS uses the null value for these columns * regardless of the UseCsvNoSupValue setting.

This * setting is supported in DMS versions 3.4.1 and later.

*/ inline void SetCsvNoSupValue(const char* value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue.assign(value); } /** *

This setting only applies if your Amazon S3 output files during a change data * capture (CDC) load are written in .csv format. If * UseCsvNoSupValue is set to true, specify a string value that * you want DMS to use for all columns not included in the supplemental log. If you * do not specify a string value, DMS uses the null value for these columns * regardless of the UseCsvNoSupValue setting.

This * setting is supported in DMS versions 3.4.1 and later.

*/ inline S3Settings& WithCsvNoSupValue(const Aws::String& value) { SetCsvNoSupValue(value); return *this;} /** *

This setting only applies if your Amazon S3 output files during a change data * capture (CDC) load are written in .csv format. If * UseCsvNoSupValue is set to true, specify a string value that * you want DMS to use for all columns not included in the supplemental log. If you * do not specify a string value, DMS uses the null value for these columns * regardless of the UseCsvNoSupValue setting.

This * setting is supported in DMS versions 3.4.1 and later.

*/ inline S3Settings& WithCsvNoSupValue(Aws::String&& value) { SetCsvNoSupValue(std::move(value)); return *this;} /** *

This setting only applies if your Amazon S3 output files during a change data * capture (CDC) load are written in .csv format. If * UseCsvNoSupValue is set to true, specify a string value that * you want DMS to use for all columns not included in the supplemental log. If you * do not specify a string value, DMS uses the null value for these columns * regardless of the UseCsvNoSupValue setting.

This * setting is supported in DMS versions 3.4.1 and later.

*/ inline S3Settings& WithCsvNoSupValue(const char* value) { SetCsvNoSupValue(value); return *this;} /** *

If set to true, DMS saves the transaction order for a change * data capture (CDC) load on the Amazon S3 target specified by * CdcPath . For more information, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline bool GetPreserveTransactions() const{ return m_preserveTransactions; } /** *

If set to true, DMS saves the transaction order for a change * data capture (CDC) load on the Amazon S3 target specified by * CdcPath . For more information, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline bool PreserveTransactionsHasBeenSet() const { return m_preserveTransactionsHasBeenSet; } /** *

If set to true, DMS saves the transaction order for a change * data capture (CDC) load on the Amazon S3 target specified by * CdcPath . For more information, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline void SetPreserveTransactions(bool value) { m_preserveTransactionsHasBeenSet = true; m_preserveTransactions = value; } /** *

If set to true, DMS saves the transaction order for a change * data capture (CDC) load on the Amazon S3 target specified by * CdcPath . For more information, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline S3Settings& WithPreserveTransactions(bool value) { SetPreserveTransactions(value); return *this;} /** *

Specifies the folder path of CDC files. For an S3 source, this setting is * required if a task captures change data; otherwise, it's optional. If * CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set * PreserveTransactions to true, DMS verifies that * you have set this parameter to a folder path on your S3 target where DMS can * save the transaction order for the CDC load. DMS creates this CDC folder path in * either your S3 target working directory or the S3 target location specified by * * BucketFolder and * BucketName .

For example, if you specify * CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify * BucketFolder, DMS creates the CDC folder path following: * MyTargetBucket/MyChangedData.

If you specify the same * CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as * MyTargetData, DMS creates the CDC folder path following: * MyTargetBucket/MyTargetData/MyChangedData.

For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline const Aws::String& GetCdcPath() const{ return m_cdcPath; } /** *

Specifies the folder path of CDC files. For an S3 source, this setting is * required if a task captures change data; otherwise, it's optional. If * CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set * PreserveTransactions to true, DMS verifies that * you have set this parameter to a folder path on your S3 target where DMS can * save the transaction order for the CDC load. DMS creates this CDC folder path in * either your S3 target working directory or the S3 target location specified by * * BucketFolder and * BucketName .

For example, if you specify * CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify * BucketFolder, DMS creates the CDC folder path following: * MyTargetBucket/MyChangedData.

If you specify the same * CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as * MyTargetData, DMS creates the CDC folder path following: * MyTargetBucket/MyTargetData/MyChangedData.

For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline bool CdcPathHasBeenSet() const { return m_cdcPathHasBeenSet; } /** *

Specifies the folder path of CDC files. For an S3 source, this setting is * required if a task captures change data; otherwise, it's optional. If * CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set * PreserveTransactions to true, DMS verifies that * you have set this parameter to a folder path on your S3 target where DMS can * save the transaction order for the CDC load. DMS creates this CDC folder path in * either your S3 target working directory or the S3 target location specified by * * BucketFolder and * BucketName .

For example, if you specify * CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify * BucketFolder, DMS creates the CDC folder path following: * MyTargetBucket/MyChangedData.

If you specify the same * CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as * MyTargetData, DMS creates the CDC folder path following: * MyTargetBucket/MyTargetData/MyChangedData.

For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline void SetCdcPath(const Aws::String& value) { m_cdcPathHasBeenSet = true; m_cdcPath = value; } /** *

Specifies the folder path of CDC files. For an S3 source, this setting is * required if a task captures change data; otherwise, it's optional. If * CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set * PreserveTransactions to true, DMS verifies that * you have set this parameter to a folder path on your S3 target where DMS can * save the transaction order for the CDC load. DMS creates this CDC folder path in * either your S3 target working directory or the S3 target location specified by * * BucketFolder and * BucketName .

For example, if you specify * CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify * BucketFolder, DMS creates the CDC folder path following: * MyTargetBucket/MyChangedData.

If you specify the same * CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as * MyTargetData, DMS creates the CDC folder path following: * MyTargetBucket/MyTargetData/MyChangedData.

For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline void SetCdcPath(Aws::String&& value) { m_cdcPathHasBeenSet = true; m_cdcPath = std::move(value); } /** *

Specifies the folder path of CDC files. For an S3 source, this setting is * required if a task captures change data; otherwise, it's optional. If * CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set * PreserveTransactions to true, DMS verifies that * you have set this parameter to a folder path on your S3 target where DMS can * save the transaction order for the CDC load. DMS creates this CDC folder path in * either your S3 target working directory or the S3 target location specified by * * BucketFolder and * BucketName .

For example, if you specify * CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify * BucketFolder, DMS creates the CDC folder path following: * MyTargetBucket/MyChangedData.

If you specify the same * CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as * MyTargetData, DMS creates the CDC folder path following: * MyTargetBucket/MyTargetData/MyChangedData.

For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline void SetCdcPath(const char* value) { m_cdcPathHasBeenSet = true; m_cdcPath.assign(value); } /** *

Specifies the folder path of CDC files. For an S3 source, this setting is * required if a task captures change data; otherwise, it's optional. If * CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set * PreserveTransactions to true, DMS verifies that * you have set this parameter to a folder path on your S3 target where DMS can * save the transaction order for the CDC load. DMS creates this CDC folder path in * either your S3 target working directory or the S3 target location specified by * * BucketFolder and * BucketName .

For example, if you specify * CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify * BucketFolder, DMS creates the CDC folder path following: * MyTargetBucket/MyChangedData.

If you specify the same * CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as * MyTargetData, DMS creates the CDC folder path following: * MyTargetBucket/MyTargetData/MyChangedData.

For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline S3Settings& WithCdcPath(const Aws::String& value) { SetCdcPath(value); return *this;} /** *

Specifies the folder path of CDC files. For an S3 source, this setting is * required if a task captures change data; otherwise, it's optional. If * CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set * PreserveTransactions to true, DMS verifies that * you have set this parameter to a folder path on your S3 target where DMS can * save the transaction order for the CDC load. DMS creates this CDC folder path in * either your S3 target working directory or the S3 target location specified by * * BucketFolder and * BucketName .

For example, if you specify * CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify * BucketFolder, DMS creates the CDC folder path following: * MyTargetBucket/MyChangedData.

If you specify the same * CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as * MyTargetData, DMS creates the CDC folder path following: * MyTargetBucket/MyTargetData/MyChangedData.

For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline S3Settings& WithCdcPath(Aws::String&& value) { SetCdcPath(std::move(value)); return *this;} /** *

Specifies the folder path of CDC files. For an S3 source, this setting is * required if a task captures change data; otherwise, it's optional. If * CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set * PreserveTransactions to true, DMS verifies that * you have set this parameter to a folder path on your S3 target where DMS can * save the transaction order for the CDC load. DMS creates this CDC folder path in * either your S3 target working directory or the S3 target location specified by * * BucketFolder and * BucketName .

For example, if you specify * CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify * BucketFolder, DMS creates the CDC folder path following: * MyTargetBucket/MyChangedData.

If you specify the same * CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as * MyTargetData, DMS creates the CDC folder path following: * MyTargetBucket/MyTargetData/MyChangedData.

For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.

*

This setting is supported in DMS versions 3.4.2 and later.

*/ inline S3Settings& WithCdcPath(const char* value) { SetCdcPath(value); return *this;} /** *

When set to true, this parameter uses the task start time as the timestamp * column value instead of the time data is written to target. For full load, when * useTaskStartTimeForFullLoadTimestamp is set to true, * each row of the timestamp column contains the task start time. For CDC loads, * each row of the timestamp column contains the transaction commit time.

*

When useTaskStartTimeForFullLoadTimestamp is set to * false, the full load timestamp in the timestamp column increments * with the time data arrives at the target.

*/ inline bool GetUseTaskStartTimeForFullLoadTimestamp() const{ return m_useTaskStartTimeForFullLoadTimestamp; } /** *

When set to true, this parameter uses the task start time as the timestamp * column value instead of the time data is written to target. For full load, when * useTaskStartTimeForFullLoadTimestamp is set to true, * each row of the timestamp column contains the task start time. For CDC loads, * each row of the timestamp column contains the transaction commit time.

*

When useTaskStartTimeForFullLoadTimestamp is set to * false, the full load timestamp in the timestamp column increments * with the time data arrives at the target.

*/ inline bool UseTaskStartTimeForFullLoadTimestampHasBeenSet() const { return m_useTaskStartTimeForFullLoadTimestampHasBeenSet; } /** *

When set to true, this parameter uses the task start time as the timestamp * column value instead of the time data is written to target. For full load, when * useTaskStartTimeForFullLoadTimestamp is set to true, * each row of the timestamp column contains the task start time. For CDC loads, * each row of the timestamp column contains the transaction commit time.

*

When useTaskStartTimeForFullLoadTimestamp is set to * false, the full load timestamp in the timestamp column increments * with the time data arrives at the target.

*/ inline void SetUseTaskStartTimeForFullLoadTimestamp(bool value) { m_useTaskStartTimeForFullLoadTimestampHasBeenSet = true; m_useTaskStartTimeForFullLoadTimestamp = value; } /** *

When set to true, this parameter uses the task start time as the timestamp * column value instead of the time data is written to target. For full load, when * useTaskStartTimeForFullLoadTimestamp is set to true, * each row of the timestamp column contains the task start time. For CDC loads, * each row of the timestamp column contains the transaction commit time.

*

When useTaskStartTimeForFullLoadTimestamp is set to * false, the full load timestamp in the timestamp column increments * with the time data arrives at the target.

*/ inline S3Settings& WithUseTaskStartTimeForFullLoadTimestamp(bool value) { SetUseTaskStartTimeForFullLoadTimestamp(value); return *this;} /** *

A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.

The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.

*/ inline const CannedAclForObjectsValue& GetCannedAclForObjects() const{ return m_cannedAclForObjects; } /** *

A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.

The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.

*/ inline bool CannedAclForObjectsHasBeenSet() const { return m_cannedAclForObjectsHasBeenSet; } /** *

A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.

The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.

*/ inline void SetCannedAclForObjects(const CannedAclForObjectsValue& value) { m_cannedAclForObjectsHasBeenSet = true; m_cannedAclForObjects = value; } /** *

A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.

The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.

*/ inline void SetCannedAclForObjects(CannedAclForObjectsValue&& value) { m_cannedAclForObjectsHasBeenSet = true; m_cannedAclForObjects = std::move(value); } /** *

A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.

The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.

*/ inline S3Settings& WithCannedAclForObjects(const CannedAclForObjectsValue& value) { SetCannedAclForObjects(value); return *this;} /** *

A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.

The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.

*/ inline S3Settings& WithCannedAclForObjects(CannedAclForObjectsValue&& value) { SetCannedAclForObjects(std::move(value)); return *this;} /** *

An optional parameter that, when set to true or y, * you can use to add column name information to the .csv output file.

The * default value is false. Valid values are true, * false, y, and n.

*/ inline bool GetAddColumnName() const{ return m_addColumnName; } /** *

An optional parameter that, when set to true or y, * you can use to add column name information to the .csv output file.

The * default value is false. Valid values are true, * false, y, and n.

*/ inline bool AddColumnNameHasBeenSet() const { return m_addColumnNameHasBeenSet; } /** *

An optional parameter that, when set to true or y, * you can use to add column name information to the .csv output file.

The * default value is false. Valid values are true, * false, y, and n.

*/ inline void SetAddColumnName(bool value) { m_addColumnNameHasBeenSet = true; m_addColumnName = value; } /** *

An optional parameter that, when set to true or y, * you can use to add column name information to the .csv output file.

The * default value is false. Valid values are true, * false, y, and n.

*/ inline S3Settings& WithAddColumnName(bool value) { SetAddColumnName(value); return *this;} /** *

Maximum length of the interval, defined in seconds, after which to output a * file to Amazon S3.

When CdcMaxBatchInterval and * CdcMinFileSize are both specified, the file write is triggered by * whichever parameter condition is met first within an DMS CloudFormation * template.

The default value is 60 seconds.

*/ inline int GetCdcMaxBatchInterval() const{ return m_cdcMaxBatchInterval; } /** *

Maximum length of the interval, defined in seconds, after which to output a * file to Amazon S3.

When CdcMaxBatchInterval and * CdcMinFileSize are both specified, the file write is triggered by * whichever parameter condition is met first within an DMS CloudFormation * template.

The default value is 60 seconds.

*/ inline bool CdcMaxBatchIntervalHasBeenSet() const { return m_cdcMaxBatchIntervalHasBeenSet; } /** *

Maximum length of the interval, defined in seconds, after which to output a * file to Amazon S3.

When CdcMaxBatchInterval and * CdcMinFileSize are both specified, the file write is triggered by * whichever parameter condition is met first within an DMS CloudFormation * template.

The default value is 60 seconds.

*/ inline void SetCdcMaxBatchInterval(int value) { m_cdcMaxBatchIntervalHasBeenSet = true; m_cdcMaxBatchInterval = value; } /** *

Maximum length of the interval, defined in seconds, after which to output a * file to Amazon S3.

When CdcMaxBatchInterval and * CdcMinFileSize are both specified, the file write is triggered by * whichever parameter condition is met first within an DMS CloudFormation * template.

The default value is 60 seconds.

*/ inline S3Settings& WithCdcMaxBatchInterval(int value) { SetCdcMaxBatchInterval(value); return *this;} /** *

Minimum file size, defined in kilobytes, to reach for a file output to Amazon * S3.

When CdcMinFileSize and CdcMaxBatchInterval * are both specified, the file write is triggered by whichever parameter condition * is met first within an DMS CloudFormation template.

The default value is * 32 MB.

*/ inline int GetCdcMinFileSize() const{ return m_cdcMinFileSize; } /** *

Minimum file size, defined in kilobytes, to reach for a file output to Amazon * S3.

When CdcMinFileSize and CdcMaxBatchInterval * are both specified, the file write is triggered by whichever parameter condition * is met first within an DMS CloudFormation template.

The default value is * 32 MB.

*/ inline bool CdcMinFileSizeHasBeenSet() const { return m_cdcMinFileSizeHasBeenSet; } /** *

Minimum file size, defined in kilobytes, to reach for a file output to Amazon * S3.

When CdcMinFileSize and CdcMaxBatchInterval * are both specified, the file write is triggered by whichever parameter condition * is met first within an DMS CloudFormation template.

The default value is * 32 MB.

*/ inline void SetCdcMinFileSize(int value) { m_cdcMinFileSizeHasBeenSet = true; m_cdcMinFileSize = value; } /** *

Minimum file size, defined in kilobytes, to reach for a file output to Amazon * S3.

When CdcMinFileSize and CdcMaxBatchInterval * are both specified, the file write is triggered by whichever parameter condition * is met first within an DMS CloudFormation template.

The default value is * 32 MB.

*/ inline S3Settings& WithCdcMinFileSize(int value) { SetCdcMinFileSize(value); return *this;} /** *

An optional parameter that specifies how DMS treats null values. While * handling the null value, you can use this parameter to pass a user-defined * string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string * value and the null value. So, if you set this parameter value to the empty * string ("" or ''), DMS treats the empty string as the null value instead of * NULL.

The default value is NULL. Valid values * include any valid string.

*/ inline const Aws::String& GetCsvNullValue() const{ return m_csvNullValue; } /** *

An optional parameter that specifies how DMS treats null values. While * handling the null value, you can use this parameter to pass a user-defined * string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string * value and the null value. So, if you set this parameter value to the empty * string ("" or ''), DMS treats the empty string as the null value instead of * NULL.

The default value is NULL. Valid values * include any valid string.

*/ inline bool CsvNullValueHasBeenSet() const { return m_csvNullValueHasBeenSet; } /** *

An optional parameter that specifies how DMS treats null values. While * handling the null value, you can use this parameter to pass a user-defined * string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string * value and the null value. So, if you set this parameter value to the empty * string ("" or ''), DMS treats the empty string as the null value instead of * NULL.

The default value is NULL. Valid values * include any valid string.

*/ inline void SetCsvNullValue(const Aws::String& value) { m_csvNullValueHasBeenSet = true; m_csvNullValue = value; } /** *

An optional parameter that specifies how DMS treats null values. While * handling the null value, you can use this parameter to pass a user-defined * string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string * value and the null value. So, if you set this parameter value to the empty * string ("" or ''), DMS treats the empty string as the null value instead of * NULL.

The default value is NULL. Valid values * include any valid string.

*/ inline void SetCsvNullValue(Aws::String&& value) { m_csvNullValueHasBeenSet = true; m_csvNullValue = std::move(value); } /** *

An optional parameter that specifies how DMS treats null values. While * handling the null value, you can use this parameter to pass a user-defined * string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string * value and the null value. So, if you set this parameter value to the empty * string ("" or ''), DMS treats the empty string as the null value instead of * NULL.

The default value is NULL. Valid values * include any valid string.

*/ inline void SetCsvNullValue(const char* value) { m_csvNullValueHasBeenSet = true; m_csvNullValue.assign(value); } /** *

An optional parameter that specifies how DMS treats null values. While * handling the null value, you can use this parameter to pass a user-defined * string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string * value and the null value. So, if you set this parameter value to the empty * string ("" or ''), DMS treats the empty string as the null value instead of * NULL.

The default value is NULL. Valid values * include any valid string.

*/ inline S3Settings& WithCsvNullValue(const Aws::String& value) { SetCsvNullValue(value); return *this;} /** *

An optional parameter that specifies how DMS treats null values. While * handling the null value, you can use this parameter to pass a user-defined * string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string * value and the null value. So, if you set this parameter value to the empty * string ("" or ''), DMS treats the empty string as the null value instead of * NULL.

The default value is NULL. Valid values * include any valid string.

*/ inline S3Settings& WithCsvNullValue(Aws::String&& value) { SetCsvNullValue(std::move(value)); return *this;} /** *

An optional parameter that specifies how DMS treats null values. While * handling the null value, you can use this parameter to pass a user-defined * string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string * value and the null value. So, if you set this parameter value to the empty * string ("" or ''), DMS treats the empty string as the null value instead of * NULL.

The default value is NULL. Valid values * include any valid string.

*/ inline S3Settings& WithCsvNullValue(const char* value) { SetCsvNullValue(value); return *this;} /** *

When this value is set to 1, DMS ignores the first row header in a .csv file. * A value of 1 turns on the feature; a value of 0 turns off the feature.

*

The default is 0.

*/ inline int GetIgnoreHeaderRows() const{ return m_ignoreHeaderRows; } /** *

When this value is set to 1, DMS ignores the first row header in a .csv file. * A value of 1 turns on the feature; a value of 0 turns off the feature.

*

The default is 0.

*/ inline bool IgnoreHeaderRowsHasBeenSet() const { return m_ignoreHeaderRowsHasBeenSet; } /** *

When this value is set to 1, DMS ignores the first row header in a .csv file. * A value of 1 turns on the feature; a value of 0 turns off the feature.

*

The default is 0.

*/ inline void SetIgnoreHeaderRows(int value) { m_ignoreHeaderRowsHasBeenSet = true; m_ignoreHeaderRows = value; } /** *

When this value is set to 1, DMS ignores the first row header in a .csv file. * A value of 1 turns on the feature; a value of 0 turns off the feature.

*

The default is 0.

*/ inline S3Settings& WithIgnoreHeaderRows(int value) { SetIgnoreHeaderRows(value); return *this;} /** *

A value that specifies the maximum size (in KB) of any .csv file to be * created while migrating to an S3 target during full load.

The default * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.

*/ inline int GetMaxFileSize() const{ return m_maxFileSize; } /** *

A value that specifies the maximum size (in KB) of any .csv file to be * created while migrating to an S3 target during full load.

The default * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.

*/ inline bool MaxFileSizeHasBeenSet() const { return m_maxFileSizeHasBeenSet; } /** *

A value that specifies the maximum size (in KB) of any .csv file to be * created while migrating to an S3 target during full load.

The default * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.

*/ inline void SetMaxFileSize(int value) { m_maxFileSizeHasBeenSet = true; m_maxFileSize = value; } /** *

A value that specifies the maximum size (in KB) of any .csv file to be * created while migrating to an S3 target during full load.

The default * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.

*/ inline S3Settings& WithMaxFileSize(int value) { SetMaxFileSize(value); return *this;} /** *

For an S3 source, when this value is set to true or * y, each leading double quotation mark has to be followed by an * ending double quotation mark. This formatting complies with RFC 4180. When this * value is set to false or n, string literals are copied * to the target as is. In this case, a delimiter (row or column) signals the end * of the field. Thus, you can't use a delimiter as part of the string, because it * signals the end of the value.

For an S3 target, an optional parameter * used to set behavior to comply with RFC 4180 for data migrated to Amazon S3 * using .csv file format only. When this value is set to true or * y using Amazon S3 as a target, if the data has quotation marks or * newline characters in it, DMS encloses the entire column with an additional pair * of double quotation marks ("). Every quotation mark within the data is repeated * twice.

The default value is true. Valid values include * true, false, y, and n.

*/ inline bool GetRfc4180() const{ return m_rfc4180; } /** *

For an S3 source, when this value is set to true or * y, each leading double quotation mark has to be followed by an * ending double quotation mark. This formatting complies with RFC 4180. When this * value is set to false or n, string literals are copied * to the target as is. In this case, a delimiter (row or column) signals the end * of the field. Thus, you can't use a delimiter as part of the string, because it * signals the end of the value.

For an S3 target, an optional parameter * used to set behavior to comply with RFC 4180 for data migrated to Amazon S3 * using .csv file format only. When this value is set to true or * y using Amazon S3 as a target, if the data has quotation marks or * newline characters in it, DMS encloses the entire column with an additional pair * of double quotation marks ("). Every quotation mark within the data is repeated * twice.

The default value is true. Valid values include * true, false, y, and n.

*/ inline bool Rfc4180HasBeenSet() const { return m_rfc4180HasBeenSet; } /** *

For an S3 source, when this value is set to true or * y, each leading double quotation mark has to be followed by an * ending double quotation mark. This formatting complies with RFC 4180. When this * value is set to false or n, string literals are copied * to the target as is. In this case, a delimiter (row or column) signals the end * of the field. Thus, you can't use a delimiter as part of the string, because it * signals the end of the value.

For an S3 target, an optional parameter * used to set behavior to comply with RFC 4180 for data migrated to Amazon S3 * using .csv file format only. When this value is set to true or * y using Amazon S3 as a target, if the data has quotation marks or * newline characters in it, DMS encloses the entire column with an additional pair * of double quotation marks ("). Every quotation mark within the data is repeated * twice.

The default value is true. Valid values include * true, false, y, and n.

*/ inline void SetRfc4180(bool value) { m_rfc4180HasBeenSet = true; m_rfc4180 = value; } /** *

For an S3 source, when this value is set to true or * y, each leading double quotation mark has to be followed by an * ending double quotation mark. This formatting complies with RFC 4180. When this * value is set to false or n, string literals are copied * to the target as is. In this case, a delimiter (row or column) signals the end * of the field. Thus, you can't use a delimiter as part of the string, because it * signals the end of the value.

For an S3 target, an optional parameter * used to set behavior to comply with RFC 4180 for data migrated to Amazon S3 * using .csv file format only. When this value is set to true or * y using Amazon S3 as a target, if the data has quotation marks or * newline characters in it, DMS encloses the entire column with an additional pair * of double quotation marks ("). Every quotation mark within the data is repeated * twice.

The default value is true. Valid values include * true, false, y, and n.

*/ inline S3Settings& WithRfc4180(bool value) { SetRfc4180(value); return *this;} /** *

When creating an S3 target endpoint, set DatePartitionTimezone * to convert the current UTC time into a specified time zone. The conversion * occurs when a date partition folder is created and a CDC filename is generated. * The time zone format is Area/Location. Use this parameter when * DatePartitionedEnabled is set to true, as shown in the * following example.

s3-settings='{"DatePartitionEnabled": true, * "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", * "DatePartitionTimezone":"Asia/Seoul", "BucketName": * "dms-nattarat-test"}'

*/ inline const Aws::String& GetDatePartitionTimezone() const{ return m_datePartitionTimezone; } /** *

When creating an S3 target endpoint, set DatePartitionTimezone * to convert the current UTC time into a specified time zone. The conversion * occurs when a date partition folder is created and a CDC filename is generated. * The time zone format is Area/Location. Use this parameter when * DatePartitionedEnabled is set to true, as shown in the * following example.

s3-settings='{"DatePartitionEnabled": true, * "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", * "DatePartitionTimezone":"Asia/Seoul", "BucketName": * "dms-nattarat-test"}'

*/ inline bool DatePartitionTimezoneHasBeenSet() const { return m_datePartitionTimezoneHasBeenSet; } /** *

When creating an S3 target endpoint, set DatePartitionTimezone * to convert the current UTC time into a specified time zone. The conversion * occurs when a date partition folder is created and a CDC filename is generated. * The time zone format is Area/Location. Use this parameter when * DatePartitionedEnabled is set to true, as shown in the * following example.

s3-settings='{"DatePartitionEnabled": true, * "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", * "DatePartitionTimezone":"Asia/Seoul", "BucketName": * "dms-nattarat-test"}'

*/ inline void SetDatePartitionTimezone(const Aws::String& value) { m_datePartitionTimezoneHasBeenSet = true; m_datePartitionTimezone = value; } /** *

When creating an S3 target endpoint, set DatePartitionTimezone * to convert the current UTC time into a specified time zone. The conversion * occurs when a date partition folder is created and a CDC filename is generated. * The time zone format is Area/Location. Use this parameter when * DatePartitionedEnabled is set to true, as shown in the * following example.

s3-settings='{"DatePartitionEnabled": true, * "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", * "DatePartitionTimezone":"Asia/Seoul", "BucketName": * "dms-nattarat-test"}'

*/ inline void SetDatePartitionTimezone(Aws::String&& value) { m_datePartitionTimezoneHasBeenSet = true; m_datePartitionTimezone = std::move(value); } /** *

When creating an S3 target endpoint, set DatePartitionTimezone * to convert the current UTC time into a specified time zone. The conversion * occurs when a date partition folder is created and a CDC filename is generated. * The time zone format is Area/Location. Use this parameter when * DatePartitionedEnabled is set to true, as shown in the * following example.

s3-settings='{"DatePartitionEnabled": true, * "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", * "DatePartitionTimezone":"Asia/Seoul", "BucketName": * "dms-nattarat-test"}'

*/ inline void SetDatePartitionTimezone(const char* value) { m_datePartitionTimezoneHasBeenSet = true; m_datePartitionTimezone.assign(value); } /** *

When creating an S3 target endpoint, set DatePartitionTimezone * to convert the current UTC time into a specified time zone. The conversion * occurs when a date partition folder is created and a CDC filename is generated. * The time zone format is Area/Location. Use this parameter when * DatePartitionedEnabled is set to true, as shown in the * following example.

s3-settings='{"DatePartitionEnabled": true, * "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", * "DatePartitionTimezone":"Asia/Seoul", "BucketName": * "dms-nattarat-test"}'

*/ inline S3Settings& WithDatePartitionTimezone(const Aws::String& value) { SetDatePartitionTimezone(value); return *this;} /** *

When creating an S3 target endpoint, set DatePartitionTimezone * to convert the current UTC time into a specified time zone. The conversion * occurs when a date partition folder is created and a CDC filename is generated. * The time zone format is Area/Location. Use this parameter when * DatePartitionedEnabled is set to true, as shown in the * following example.

s3-settings='{"DatePartitionEnabled": true, * "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", * "DatePartitionTimezone":"Asia/Seoul", "BucketName": * "dms-nattarat-test"}'

*/ inline S3Settings& WithDatePartitionTimezone(Aws::String&& value) { SetDatePartitionTimezone(std::move(value)); return *this;} /** *

When creating an S3 target endpoint, set DatePartitionTimezone * to convert the current UTC time into a specified time zone. The conversion * occurs when a date partition folder is created and a CDC filename is generated. * The time zone format is Area/Location. Use this parameter when * DatePartitionedEnabled is set to true, as shown in the * following example.

s3-settings='{"DatePartitionEnabled": true, * "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", * "DatePartitionTimezone":"Asia/Seoul", "BucketName": * "dms-nattarat-test"}'

*/ inline S3Settings& WithDatePartitionTimezone(const char* value) { SetDatePartitionTimezone(value); return *this;} /** *

Use the S3 target endpoint setting AddTrailingPaddingCharacter * to add padding on string data. The default value is false.

*/ inline bool GetAddTrailingPaddingCharacter() const{ return m_addTrailingPaddingCharacter; } /** *

Use the S3 target endpoint setting AddTrailingPaddingCharacter * to add padding on string data. The default value is false.

*/ inline bool AddTrailingPaddingCharacterHasBeenSet() const { return m_addTrailingPaddingCharacterHasBeenSet; } /** *

Use the S3 target endpoint setting AddTrailingPaddingCharacter * to add padding on string data. The default value is false.

*/ inline void SetAddTrailingPaddingCharacter(bool value) { m_addTrailingPaddingCharacterHasBeenSet = true; m_addTrailingPaddingCharacter = value; } /** *

Use the S3 target endpoint setting AddTrailingPaddingCharacter * to add padding on string data. The default value is false.

*/ inline S3Settings& WithAddTrailingPaddingCharacter(bool value) { SetAddTrailingPaddingCharacter(value); return *this;} /** *

To specify a bucket owner and prevent sniping, you can use the * ExpectedBucketOwner endpoint setting.

Example: * --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.

*/ inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; } /** *

To specify a bucket owner and prevent sniping, you can use the * ExpectedBucketOwner endpoint setting.

Example: * --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.

*/ inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; } /** *

To specify a bucket owner and prevent sniping, you can use the * ExpectedBucketOwner endpoint setting.

Example: * --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.

*/ inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; } /** *

To specify a bucket owner and prevent sniping, you can use the * ExpectedBucketOwner endpoint setting.

Example: * --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.

*/ inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); } /** *

To specify a bucket owner and prevent sniping, you can use the * ExpectedBucketOwner endpoint setting.

Example: * --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.

*/ inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); } /** *

To specify a bucket owner and prevent sniping, you can use the * ExpectedBucketOwner endpoint setting.

Example: * --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.

*/ inline S3Settings& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;} /** *

To specify a bucket owner and prevent sniping, you can use the * ExpectedBucketOwner endpoint setting.

Example: * --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.

*/ inline S3Settings& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;} /** *

To specify a bucket owner and prevent sniping, you can use the * ExpectedBucketOwner endpoint setting.

Example: * --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.

*/ inline S3Settings& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;} /** *

When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog * lets you use Athena to query your data.

*/ inline bool GetGlueCatalogGeneration() const{ return m_glueCatalogGeneration; } /** *

When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog * lets you use Athena to query your data.

*/ inline bool GlueCatalogGenerationHasBeenSet() const { return m_glueCatalogGenerationHasBeenSet; } /** *

When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog * lets you use Athena to query your data.

*/ inline void SetGlueCatalogGeneration(bool value) { m_glueCatalogGenerationHasBeenSet = true; m_glueCatalogGeneration = value; } /** *

When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog * lets you use Athena to query your data.

*/ inline S3Settings& WithGlueCatalogGeneration(bool value) { SetGlueCatalogGeneration(value); return *this;} private: Aws::String m_serviceAccessRoleArn; bool m_serviceAccessRoleArnHasBeenSet = false; Aws::String m_externalTableDefinition; bool m_externalTableDefinitionHasBeenSet = false; Aws::String m_csvRowDelimiter; bool m_csvRowDelimiterHasBeenSet = false; Aws::String m_csvDelimiter; bool m_csvDelimiterHasBeenSet = false; Aws::String m_bucketFolder; bool m_bucketFolderHasBeenSet = false; Aws::String m_bucketName; bool m_bucketNameHasBeenSet = false; CompressionTypeValue m_compressionType; bool m_compressionTypeHasBeenSet = false; EncryptionModeValue m_encryptionMode; bool m_encryptionModeHasBeenSet = false; Aws::String m_serverSideEncryptionKmsKeyId; bool m_serverSideEncryptionKmsKeyIdHasBeenSet = false; DataFormatValue m_dataFormat; bool m_dataFormatHasBeenSet = false; EncodingTypeValue m_encodingType; bool m_encodingTypeHasBeenSet = false; int m_dictPageSizeLimit; bool m_dictPageSizeLimitHasBeenSet = false; int m_rowGroupLength; bool m_rowGroupLengthHasBeenSet = false; int m_dataPageSize; bool m_dataPageSizeHasBeenSet = false; ParquetVersionValue m_parquetVersion; bool m_parquetVersionHasBeenSet = false; bool m_enableStatistics; bool m_enableStatisticsHasBeenSet = false; bool m_includeOpForFullLoad; bool m_includeOpForFullLoadHasBeenSet = false; bool m_cdcInsertsOnly; bool m_cdcInsertsOnlyHasBeenSet = false; Aws::String m_timestampColumnName; bool m_timestampColumnNameHasBeenSet = false; bool m_parquetTimestampInMillisecond; bool m_parquetTimestampInMillisecondHasBeenSet = false; bool m_cdcInsertsAndUpdates; bool m_cdcInsertsAndUpdatesHasBeenSet = false; bool m_datePartitionEnabled; bool m_datePartitionEnabledHasBeenSet = false; DatePartitionSequenceValue m_datePartitionSequence; bool m_datePartitionSequenceHasBeenSet = false; DatePartitionDelimiterValue m_datePartitionDelimiter; bool m_datePartitionDelimiterHasBeenSet = false; bool m_useCsvNoSupValue; bool m_useCsvNoSupValueHasBeenSet = false; Aws::String m_csvNoSupValue; bool m_csvNoSupValueHasBeenSet = false; bool m_preserveTransactions; bool m_preserveTransactionsHasBeenSet = false; Aws::String m_cdcPath; bool m_cdcPathHasBeenSet = false; bool m_useTaskStartTimeForFullLoadTimestamp; bool m_useTaskStartTimeForFullLoadTimestampHasBeenSet = false; CannedAclForObjectsValue m_cannedAclForObjects; bool m_cannedAclForObjectsHasBeenSet = false; bool m_addColumnName; bool m_addColumnNameHasBeenSet = false; int m_cdcMaxBatchInterval; bool m_cdcMaxBatchIntervalHasBeenSet = false; int m_cdcMinFileSize; bool m_cdcMinFileSizeHasBeenSet = false; Aws::String m_csvNullValue; bool m_csvNullValueHasBeenSet = false; int m_ignoreHeaderRows; bool m_ignoreHeaderRowsHasBeenSet = false; int m_maxFileSize; bool m_maxFileSizeHasBeenSet = false; bool m_rfc4180; bool m_rfc4180HasBeenSet = false; Aws::String m_datePartitionTimezone; bool m_datePartitionTimezoneHasBeenSet = false; bool m_addTrailingPaddingCharacter; bool m_addTrailingPaddingCharacterHasBeenSet = false; Aws::String m_expectedBucketOwner; bool m_expectedBucketOwnerHasBeenSet = false; bool m_glueCatalogGeneration; bool m_glueCatalogGenerationHasBeenSet = false; }; } // namespace Model } // namespace DatabaseMigrationService } // namespace Aws