/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Settings for exporting data to Amazon S3. See Also:
AWS API
* Reference
The Amazon Resource Name (ARN) used by the service to access the IAM role.
* The role must allow the iam:PassRole
action. It is a required
* parameter that enables DMS to write and read objects from an S3 bucket.
The Amazon Resource Name (ARN) used by the service to access the IAM role.
* The role must allow the iam:PassRole
action. It is a required
* parameter that enables DMS to write and read objects from an S3 bucket.
The Amazon Resource Name (ARN) used by the service to access the IAM role.
* The role must allow the iam:PassRole
action. It is a required
* parameter that enables DMS to write and read objects from an S3 bucket.
The Amazon Resource Name (ARN) used by the service to access the IAM role.
* The role must allow the iam:PassRole
action. It is a required
* parameter that enables DMS to write and read objects from an S3 bucket.
The Amazon Resource Name (ARN) used by the service to access the IAM role.
* The role must allow the iam:PassRole
action. It is a required
* parameter that enables DMS to write and read objects from an S3 bucket.
The Amazon Resource Name (ARN) used by the service to access the IAM role.
* The role must allow the iam:PassRole
action. It is a required
* parameter that enables DMS to write and read objects from an S3 bucket.
The Amazon Resource Name (ARN) used by the service to access the IAM role.
* The role must allow the iam:PassRole
action. It is a required
* parameter that enables DMS to write and read objects from an S3 bucket.
The Amazon Resource Name (ARN) used by the service to access the IAM role.
* The role must allow the iam:PassRole
action. It is a required
* parameter that enables DMS to write and read objects from an S3 bucket.
Specifies how tables are defined in the S3 source files only.
*/ inline const Aws::String& GetExternalTableDefinition() const{ return m_externalTableDefinition; } /** *Specifies how tables are defined in the S3 source files only.
*/ inline bool ExternalTableDefinitionHasBeenSet() const { return m_externalTableDefinitionHasBeenSet; } /** *Specifies how tables are defined in the S3 source files only.
*/ inline void SetExternalTableDefinition(const Aws::String& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = value; } /** *Specifies how tables are defined in the S3 source files only.
*/ inline void SetExternalTableDefinition(Aws::String&& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = std::move(value); } /** *Specifies how tables are defined in the S3 source files only.
*/ inline void SetExternalTableDefinition(const char* value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition.assign(value); } /** *Specifies how tables are defined in the S3 source files only.
*/ inline S3Settings& WithExternalTableDefinition(const Aws::String& value) { SetExternalTableDefinition(value); return *this;} /** *Specifies how tables are defined in the S3 source files only.
*/ inline S3Settings& WithExternalTableDefinition(Aws::String&& value) { SetExternalTableDefinition(std::move(value)); return *this;} /** *Specifies how tables are defined in the S3 source files only.
*/ inline S3Settings& WithExternalTableDefinition(const char* value) { SetExternalTableDefinition(value); return *this;} /** * The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n
).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n
).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n
).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n
).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n
).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n
).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n
).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n
).
The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline const Aws::String& GetCsvDelimiter() const{ return m_csvDelimiter; } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline bool CsvDelimiterHasBeenSet() const { return m_csvDelimiterHasBeenSet; } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline void SetCsvDelimiter(const Aws::String& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = value; } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline void SetCsvDelimiter(Aws::String&& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = std::move(value); } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline void SetCsvDelimiter(const char* value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter.assign(value); } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline S3Settings& WithCsvDelimiter(const Aws::String& value) { SetCsvDelimiter(value); return *this;} /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline S3Settings& WithCsvDelimiter(Aws::String&& value) { SetCsvDelimiter(std::move(value)); return *this;} /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline S3Settings& WithCsvDelimiter(const char* value) { SetCsvDelimiter(value); return *this;} /** * An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/
. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/
.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/
. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/
.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/
. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/
.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/
. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/
.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/
. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/
.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/
. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/
.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/
. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/
.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/
. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/
.
The name of the S3 bucket.
*/ inline const Aws::String& GetBucketName() const{ return m_bucketName; } /** *The name of the S3 bucket.
*/ inline bool BucketNameHasBeenSet() const { return m_bucketNameHasBeenSet; } /** *The name of the S3 bucket.
*/ inline void SetBucketName(const Aws::String& value) { m_bucketNameHasBeenSet = true; m_bucketName = value; } /** *The name of the S3 bucket.
*/ inline void SetBucketName(Aws::String&& value) { m_bucketNameHasBeenSet = true; m_bucketName = std::move(value); } /** *The name of the S3 bucket.
*/ inline void SetBucketName(const char* value) { m_bucketNameHasBeenSet = true; m_bucketName.assign(value); } /** *The name of the S3 bucket.
*/ inline S3Settings& WithBucketName(const Aws::String& value) { SetBucketName(value); return *this;} /** *The name of the S3 bucket.
*/ inline S3Settings& WithBucketName(Aws::String&& value) { SetBucketName(std::move(value)); return *this;} /** *The name of the S3 bucket.
*/ inline S3Settings& WithBucketName(const char* value) { SetBucketName(value); return *this;} /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline const CompressionTypeValue& GetCompressionType() const{ return m_compressionType; } /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline bool CompressionTypeHasBeenSet() const { return m_compressionTypeHasBeenSet; } /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline void SetCompressionType(const CompressionTypeValue& value) { m_compressionTypeHasBeenSet = true; m_compressionType = value; } /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline void SetCompressionType(CompressionTypeValue&& value) { m_compressionTypeHasBeenSet = true; m_compressionType = std::move(value); } /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline S3Settings& WithCompressionType(const CompressionTypeValue& value) { SetCompressionType(value); return *this;} /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline S3Settings& WithCompressionType(CompressionTypeValue&& value) { SetCompressionType(std::move(value)); return *this;} /** *The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, you need an Identity and Access Management (IAM) role with
* permission to allow "arn:aws:s3:::dms-*"
to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, you need an Identity and Access Management (IAM) role with
* permission to allow "arn:aws:s3:::dms-*"
to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, you need an Identity and Access Management (IAM) role with
* permission to allow "arn:aws:s3:::dms-*"
to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, you need an Identity and Access Management (IAM) role with
* permission to allow "arn:aws:s3:::dms-*"
to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, you need an Identity and Access Management (IAM) role with
* permission to allow "arn:aws:s3:::dms-*"
to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, you need an Identity and Access Management (IAM) role with
* permission to allow "arn:aws:s3:::dms-*"
to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
If you are using SSE_KMS
for the EncryptionMode
,
* provide the KMS key ID. The key that you use needs an attached policy that
* enables Identity and Access Management (IAM) user permissions and allows use of
* the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS
for the EncryptionMode
,
* provide the KMS key ID. The key that you use needs an attached policy that
* enables Identity and Access Management (IAM) user permissions and allows use of
* the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS
for the EncryptionMode
,
* provide the KMS key ID. The key that you use needs an attached policy that
* enables Identity and Access Management (IAM) user permissions and allows use of
* the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS
for the EncryptionMode
,
* provide the KMS key ID. The key that you use needs an attached policy that
* enables Identity and Access Management (IAM) user permissions and allows use of
* the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS
for the EncryptionMode
,
* provide the KMS key ID. The key that you use needs an attached policy that
* enables Identity and Access Management (IAM) user permissions and allows use of
* the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS
for the EncryptionMode
,
* provide the KMS key ID. The key that you use needs an attached policy that
* enables Identity and Access Management (IAM) user permissions and allows use of
* the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS
for the EncryptionMode
,
* provide the KMS key ID. The key that you use needs an attached policy that
* enables Identity and Access Management (IAM) user permissions and allows use of
* the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS
for the EncryptionMode
,
* provide the KMS key ID. The key that you use needs an attached policy that
* enables Identity and Access Management (IAM) user permissions and allows use of
* the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
The format of the data that you want to use for output. You can choose one of * the following:
csv
: This is a row-based file
* format with comma-separated values (.csv).
* parquet
: Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv
: This is a row-based file
* format with comma-separated values (.csv).
* parquet
: Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv
: This is a row-based file
* format with comma-separated values (.csv).
* parquet
: Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv
: This is a row-based file
* format with comma-separated values (.csv).
* parquet
: Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv
: This is a row-based file
* format with comma-separated values (.csv).
* parquet
: Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv
: This is a row-based file
* format with comma-separated values (.csv).
* parquet
: Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The type of encoding you are using:
* RLE_DICTIONARY
uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN
doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY
builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY
uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN
doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY
builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY
uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN
doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY
builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY
uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN
doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY
builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY
uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN
doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY
builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY
uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN
doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY
builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The maximum size of an encoded dictionary page of a column. If the dictionary
* page exceeds this, this column is stored using an encoding type of
* PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
* encoding. This size is used for .parquet file format only.
The maximum size of an encoded dictionary page of a column. If the dictionary
* page exceeds this, this column is stored using an encoding type of
* PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
* encoding. This size is used for .parquet file format only.
The maximum size of an encoded dictionary page of a column. If the dictionary
* page exceeds this, this column is stored using an encoding type of
* PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
* encoding. This size is used for .parquet file format only.
The maximum size of an encoded dictionary page of a column. If the dictionary
* page exceeds this, this column is stored using an encoding type of
* PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
* encoding. This size is used for .parquet file format only.
The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.
If you choose a value larger than the maximum,
* RowGroupLength
is set to the max row group length in bytes (64 *
* 1024 * 1024).
The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.
If you choose a value larger than the maximum,
* RowGroupLength
is set to the max row group length in bytes (64 *
* 1024 * 1024).
The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.
If you choose a value larger than the maximum,
* RowGroupLength
is set to the max row group length in bytes (64 *
* 1024 * 1024).
The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.
If you choose a value larger than the maximum,
* RowGroupLength
is set to the max row group length in bytes (64 *
* 1024 * 1024).
The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.
*/ inline int GetDataPageSize() const{ return m_dataPageSize; } /** *The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.
*/ inline bool DataPageSizeHasBeenSet() const { return m_dataPageSizeHasBeenSet; } /** *The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.
*/ inline void SetDataPageSize(int value) { m_dataPageSizeHasBeenSet = true; m_dataPageSize = value; } /** *The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.
*/ inline S3Settings& WithDataPageSize(int value) { SetDataPageSize(value); return *this;} /** *The version of the Apache Parquet format that you want to use:
* parquet_1_0
(the default) or parquet_2_0
.
The version of the Apache Parquet format that you want to use:
* parquet_1_0
(the default) or parquet_2_0
.
The version of the Apache Parquet format that you want to use:
* parquet_1_0
(the default) or parquet_2_0
.
The version of the Apache Parquet format that you want to use:
* parquet_1_0
(the default) or parquet_2_0
.
The version of the Apache Parquet format that you want to use:
* parquet_1_0
(the default) or parquet_2_0
.
The version of the Apache Parquet format that you want to use:
* parquet_1_0
(the default) or parquet_2_0
.
A value that enables statistics for Parquet pages and row groups. Choose
* true
to enable statistics, false
to disable.
* Statistics include NULL
, DISTINCT
, MAX
,
* and MIN
values. This parameter defaults to true
. This
* value is used for .parquet file format only.
A value that enables statistics for Parquet pages and row groups. Choose
* true
to enable statistics, false
to disable.
* Statistics include NULL
, DISTINCT
, MAX
,
* and MIN
values. This parameter defaults to true
. This
* value is used for .parquet file format only.
A value that enables statistics for Parquet pages and row groups. Choose
* true
to enable statistics, false
to disable.
* Statistics include NULL
, DISTINCT
, MAX
,
* and MIN
values. This parameter defaults to true
. This
* value is used for .parquet file format only.
A value that enables statistics for Parquet pages and row groups. Choose
* true
to enable statistics, false
to disable.
* Statistics include NULL
, DISTINCT
, MAX
,
* and MIN
values. This parameter defaults to true
. This
* value is used for .parquet file format only.
A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) or .parquet output files only to indicate how the * rows were added to the source database.
DMS supports the
* IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
DMS supports the use of the .parquet files with the
* IncludeOpForFullLoad
parameter in versions 3.4.7 and later.
For full load, records can only be inserted. By default (the
* false
setting), no information is recorded in these output files
* for a full load to indicate that the rows were inserted at the source database.
* If IncludeOpForFullLoad
is set to true
or
* y
, the INSERT is recorded as an I annotation in the first field of
* the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
This
* setting works together with the CdcInsertsOnly
and the
* CdcInsertsAndUpdates
parameters for output to .csv files only. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) or .parquet output files only to indicate how the * rows were added to the source database.
DMS supports the
* IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
DMS supports the use of the .parquet files with the
* IncludeOpForFullLoad
parameter in versions 3.4.7 and later.
For full load, records can only be inserted. By default (the
* false
setting), no information is recorded in these output files
* for a full load to indicate that the rows were inserted at the source database.
* If IncludeOpForFullLoad
is set to true
or
* y
, the INSERT is recorded as an I annotation in the first field of
* the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
This
* setting works together with the CdcInsertsOnly
and the
* CdcInsertsAndUpdates
parameters for output to .csv files only. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) or .parquet output files only to indicate how the * rows were added to the source database.
DMS supports the
* IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
DMS supports the use of the .parquet files with the
* IncludeOpForFullLoad
parameter in versions 3.4.7 and later.
For full load, records can only be inserted. By default (the
* false
setting), no information is recorded in these output files
* for a full load to indicate that the rows were inserted at the source database.
* If IncludeOpForFullLoad
is set to true
or
* y
, the INSERT is recorded as an I annotation in the first field of
* the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
This
* setting works together with the CdcInsertsOnly
and the
* CdcInsertsAndUpdates
parameters for output to .csv files only. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) or .parquet output files only to indicate how the * rows were added to the source database.
DMS supports the
* IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
DMS supports the use of the .parquet files with the
* IncludeOpForFullLoad
parameter in versions 3.4.7 and later.
For full load, records can only be inserted. By default (the
* false
setting), no information is recorded in these output files
* for a full load to indicate that the rows were inserted at the source database.
* If IncludeOpForFullLoad
is set to true
or
* y
, the INSERT is recorded as an I annotation in the first field of
* the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
This
* setting works together with the CdcInsertsOnly
and the
* CdcInsertsAndUpdates
parameters for output to .csv files only. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
A value that enables a change data capture (CDC) load to write only INSERT
* operations to .csv or columnar storage (.parquet) output files. By default (the
* false
setting), the first field in a .csv or .parquet record
* contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a
* CDC load to the target.
If CdcInsertsOnly
is set to
* true
or y
, only INSERTs from the source database are
* migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
* are recorded depends on the value of IncludeOpForFullLoad
. If
* IncludeOpForFullLoad
is set to true
, the first field
* of every CDC record is set to I to indicate the INSERT operation at the source.
* If IncludeOpForFullLoad
is set to false
, every CDC
* record is written without a first field to indicate the INSERT operation at the
* source. For more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
DMS supports the interaction described
* preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
CdcInsertsOnly
and CdcInsertsAndUpdates
can't both
* be set to true
for the same endpoint. Set either
* CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write only INSERT
* operations to .csv or columnar storage (.parquet) output files. By default (the
* false
setting), the first field in a .csv or .parquet record
* contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a
* CDC load to the target.
If CdcInsertsOnly
is set to
* true
or y
, only INSERTs from the source database are
* migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
* are recorded depends on the value of IncludeOpForFullLoad
. If
* IncludeOpForFullLoad
is set to true
, the first field
* of every CDC record is set to I to indicate the INSERT operation at the source.
* If IncludeOpForFullLoad
is set to false
, every CDC
* record is written without a first field to indicate the INSERT operation at the
* source. For more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
DMS supports the interaction described
* preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
CdcInsertsOnly
and CdcInsertsAndUpdates
can't both
* be set to true
for the same endpoint. Set either
* CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write only INSERT
* operations to .csv or columnar storage (.parquet) output files. By default (the
* false
setting), the first field in a .csv or .parquet record
* contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a
* CDC load to the target.
If CdcInsertsOnly
is set to
* true
or y
, only INSERTs from the source database are
* migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
* are recorded depends on the value of IncludeOpForFullLoad
. If
* IncludeOpForFullLoad
is set to true
, the first field
* of every CDC record is set to I to indicate the INSERT operation at the source.
* If IncludeOpForFullLoad
is set to false
, every CDC
* record is written without a first field to indicate the INSERT operation at the
* source. For more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
DMS supports the interaction described
* preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
CdcInsertsOnly
and CdcInsertsAndUpdates
can't both
* be set to true
for the same endpoint. Set either
* CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write only INSERT
* operations to .csv or columnar storage (.parquet) output files. By default (the
* false
setting), the first field in a .csv or .parquet record
* contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a
* CDC load to the target.
If CdcInsertsOnly
is set to
* true
or y
, only INSERTs from the source database are
* migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
* are recorded depends on the value of IncludeOpForFullLoad
. If
* IncludeOpForFullLoad
is set to true
, the first field
* of every CDC record is set to I to indicate the INSERT operation at the source.
* If IncludeOpForFullLoad
is set to false
, every CDC
* record is written without a first field to indicate the INSERT operation at the
* source. For more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
DMS supports the interaction described
* preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
CdcInsertsOnly
and CdcInsertsAndUpdates
can't both
* be set to true
for the same endpoint. Set either
* CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
DMS
* supports the TimestampColumnName
parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING
column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName
to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName
parameter is set to
* true
, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName
.
A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
DMS
* supports the TimestampColumnName
parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING
column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName
to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName
parameter is set to
* true
, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName
.
A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
DMS
* supports the TimestampColumnName
parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING
column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName
to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName
parameter is set to
* true
, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName
.
A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
DMS
* supports the TimestampColumnName
parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING
column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName
to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName
parameter is set to
* true
, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName
.
A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
DMS
* supports the TimestampColumnName
parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING
column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName
to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName
parameter is set to
* true
, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName
.
A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
DMS
* supports the TimestampColumnName
parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING
column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName
to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName
parameter is set to
* true
, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName
.
A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
DMS
* supports the TimestampColumnName
parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING
column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName
to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName
parameter is set to
* true
, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName
.
A value that when nonblank causes DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
DMS
* supports the TimestampColumnName
parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING
column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName
to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName
parameter is set to
* true
, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName
.
A value that specifies the precision of any TIMESTAMP
column
* values that are written to an Amazon S3 object file in .parquet format.
DMS supports the ParquetTimestampInMillisecond
parameter
* in versions 3.1.4 and later.
When
* ParquetTimestampInMillisecond
is set to true
or
* y
, DMS writes all TIMESTAMP
columns in a .parquet
* formatted file with millisecond precision. Otherwise, DMS writes them with
* microsecond precision.
Currently, Amazon Athena and Glue can handle only
* millisecond precision for TIMESTAMP
values. Set this parameter to
* true
for S3 endpoint object files that are .parquet formatted only
* if you plan to query or process the data with Athena or Glue.
DMS
* writes any TIMESTAMP
column values written to an S3 file in .csv
* format with microsecond precision.
Setting
* ParquetTimestampInMillisecond
has no effect on the string format of
* the timestamp column value that is inserted by setting the
* TimestampColumnName
parameter.
A value that specifies the precision of any TIMESTAMP
column
* values that are written to an Amazon S3 object file in .parquet format.
DMS supports the ParquetTimestampInMillisecond
parameter
* in versions 3.1.4 and later.
When
* ParquetTimestampInMillisecond
is set to true
or
* y
, DMS writes all TIMESTAMP
columns in a .parquet
* formatted file with millisecond precision. Otherwise, DMS writes them with
* microsecond precision.
Currently, Amazon Athena and Glue can handle only
* millisecond precision for TIMESTAMP
values. Set this parameter to
* true
for S3 endpoint object files that are .parquet formatted only
* if you plan to query or process the data with Athena or Glue.
DMS
* writes any TIMESTAMP
column values written to an S3 file in .csv
* format with microsecond precision.
Setting
* ParquetTimestampInMillisecond
has no effect on the string format of
* the timestamp column value that is inserted by setting the
* TimestampColumnName
parameter.
A value that specifies the precision of any TIMESTAMP
column
* values that are written to an Amazon S3 object file in .parquet format.
DMS supports the ParquetTimestampInMillisecond
parameter
* in versions 3.1.4 and later.
When
* ParquetTimestampInMillisecond
is set to true
or
* y
, DMS writes all TIMESTAMP
columns in a .parquet
* formatted file with millisecond precision. Otherwise, DMS writes them with
* microsecond precision.
Currently, Amazon Athena and Glue can handle only
* millisecond precision for TIMESTAMP
values. Set this parameter to
* true
for S3 endpoint object files that are .parquet formatted only
* if you plan to query or process the data with Athena or Glue.
DMS
* writes any TIMESTAMP
column values written to an S3 file in .csv
* format with microsecond precision.
Setting
* ParquetTimestampInMillisecond
has no effect on the string format of
* the timestamp column value that is inserted by setting the
* TimestampColumnName
parameter.
A value that specifies the precision of any TIMESTAMP
column
* values that are written to an Amazon S3 object file in .parquet format.
DMS supports the ParquetTimestampInMillisecond
parameter
* in versions 3.1.4 and later.
When
* ParquetTimestampInMillisecond
is set to true
or
* y
, DMS writes all TIMESTAMP
columns in a .parquet
* formatted file with millisecond precision. Otherwise, DMS writes them with
* microsecond precision.
Currently, Amazon Athena and Glue can handle only
* millisecond precision for TIMESTAMP
values. Set this parameter to
* true
for S3 endpoint object files that are .parquet formatted only
* if you plan to query or process the data with Athena or Glue.
DMS
* writes any TIMESTAMP
column values written to an S3 file in .csv
* format with microsecond precision.
Setting
* ParquetTimestampInMillisecond
has no effect on the string format of
* the timestamp column value that is inserted by setting the
* TimestampColumnName
parameter.
A value that enables a change data capture (CDC) load to write INSERT and
* UPDATE operations to .csv or .parquet (columnar storage) output files. The
* default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
,
* only INSERTs and UPDATEs from the source database are migrated to the .csv or
* .parquet file.
DMS supports the use of the .parquet files in * versions 3.4.7 and later.
How these INSERTs and UPDATEs are
* recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the
* first field of every CDC record is set to either I
or
* U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are
* written without an indication of INSERT or UPDATE operations at the source. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
DMS supports the use of the
* CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be
* set to true
for the same endpoint. Set either
* CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write INSERT and
* UPDATE operations to .csv or .parquet (columnar storage) output files. The
* default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
,
* only INSERTs and UPDATEs from the source database are migrated to the .csv or
* .parquet file.
DMS supports the use of the .parquet files in * versions 3.4.7 and later.
How these INSERTs and UPDATEs are
* recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the
* first field of every CDC record is set to either I
or
* U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are
* written without an indication of INSERT or UPDATE operations at the source. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
DMS supports the use of the
* CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be
* set to true
for the same endpoint. Set either
* CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write INSERT and
* UPDATE operations to .csv or .parquet (columnar storage) output files. The
* default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
,
* only INSERTs and UPDATEs from the source database are migrated to the .csv or
* .parquet file.
DMS supports the use of the .parquet files in * versions 3.4.7 and later.
How these INSERTs and UPDATEs are
* recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the
* first field of every CDC record is set to either I
or
* U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are
* written without an indication of INSERT or UPDATE operations at the source. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
DMS supports the use of the
* CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be
* set to true
for the same endpoint. Set either
* CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write INSERT and
* UPDATE operations to .csv or .parquet (columnar storage) output files. The
* default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
,
* only INSERTs and UPDATEs from the source database are migrated to the .csv or
* .parquet file.
DMS supports the use of the .parquet files in * versions 3.4.7 and later.
How these INSERTs and UPDATEs are
* recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the
* first field of every CDC record is set to either I
or
* U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are
* written without an indication of INSERT or UPDATE operations at the source. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the Database Migration
* Service User Guide..
DMS supports the use of the
* CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be
* set to true
for the same endpoint. Set either
* CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
When set to true
, this parameter partitions S3 bucket folders
* based on transaction commit dates. The default value is false
. For
* more information about date-based folder partitioning, see Using
* date-based folder partitioning.
When set to true
, this parameter partitions S3 bucket folders
* based on transaction commit dates. The default value is false
. For
* more information about date-based folder partitioning, see Using
* date-based folder partitioning.
When set to true
, this parameter partitions S3 bucket folders
* based on transaction commit dates. The default value is false
. For
* more information about date-based folder partitioning, see Using
* date-based folder partitioning.
When set to true
, this parameter partitions S3 bucket folders
* based on transaction commit dates. The default value is false
. For
* more information about date-based folder partitioning, see Using
* date-based folder partitioning.
Identifies the sequence of the date format to use during folder partitioning.
* The default value is YYYYMMDD
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Identifies the sequence of the date format to use during folder partitioning.
* The default value is YYYYMMDD
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Identifies the sequence of the date format to use during folder partitioning.
* The default value is YYYYMMDD
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Identifies the sequence of the date format to use during folder partitioning.
* The default value is YYYYMMDD
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Identifies the sequence of the date format to use during folder partitioning.
* The default value is YYYYMMDD
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Identifies the sequence of the date format to use during folder partitioning.
* The default value is YYYYMMDD
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Specifies a date separating delimiter to use during folder partitioning. The
* default value is SLASH
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Specifies a date separating delimiter to use during folder partitioning. The
* default value is SLASH
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Specifies a date separating delimiter to use during folder partitioning. The
* default value is SLASH
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Specifies a date separating delimiter to use during folder partitioning. The
* default value is SLASH
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Specifies a date separating delimiter to use during folder partitioning. The
* default value is SLASH
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
Specifies a date separating delimiter to use during folder partitioning. The
* default value is SLASH
. Use this parameter when
* DatePartitionedEnabled
is set to true
.
This setting applies if the S3 output files during a change data capture
* (CDC) load are written in .csv format. If set to true
for columns
* not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS
* uses the null value for these columns.
This setting is supported * in DMS versions 3.4.1 and later.
*/ inline bool GetUseCsvNoSupValue() const{ return m_useCsvNoSupValue; } /** *This setting applies if the S3 output files during a change data capture
* (CDC) load are written in .csv format. If set to true
for columns
* not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS
* uses the null value for these columns.
This setting is supported * in DMS versions 3.4.1 and later.
*/ inline bool UseCsvNoSupValueHasBeenSet() const { return m_useCsvNoSupValueHasBeenSet; } /** *This setting applies if the S3 output files during a change data capture
* (CDC) load are written in .csv format. If set to true
for columns
* not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS
* uses the null value for these columns.
This setting is supported * in DMS versions 3.4.1 and later.
*/ inline void SetUseCsvNoSupValue(bool value) { m_useCsvNoSupValueHasBeenSet = true; m_useCsvNoSupValue = value; } /** *This setting applies if the S3 output files during a change data capture
* (CDC) load are written in .csv format. If set to true
for columns
* not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS
* uses the null value for these columns.
This setting is supported * in DMS versions 3.4.1 and later.
*/ inline S3Settings& WithUseCsvNoSupValue(bool value) { SetUseCsvNoSupValue(value); return *this;} /** *This setting only applies if your Amazon S3 output files during a change data
* capture (CDC) load are written in .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that
* you want DMS to use for all columns not included in the supplemental log. If you
* do not specify a string value, DMS uses the null value for these columns
* regardless of the UseCsvNoSupValue
setting.
This * setting is supported in DMS versions 3.4.1 and later.
*/ inline const Aws::String& GetCsvNoSupValue() const{ return m_csvNoSupValue; } /** *This setting only applies if your Amazon S3 output files during a change data
* capture (CDC) load are written in .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that
* you want DMS to use for all columns not included in the supplemental log. If you
* do not specify a string value, DMS uses the null value for these columns
* regardless of the UseCsvNoSupValue
setting.
This * setting is supported in DMS versions 3.4.1 and later.
*/ inline bool CsvNoSupValueHasBeenSet() const { return m_csvNoSupValueHasBeenSet; } /** *This setting only applies if your Amazon S3 output files during a change data
* capture (CDC) load are written in .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that
* you want DMS to use for all columns not included in the supplemental log. If you
* do not specify a string value, DMS uses the null value for these columns
* regardless of the UseCsvNoSupValue
setting.
This * setting is supported in DMS versions 3.4.1 and later.
*/ inline void SetCsvNoSupValue(const Aws::String& value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue = value; } /** *This setting only applies if your Amazon S3 output files during a change data
* capture (CDC) load are written in .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that
* you want DMS to use for all columns not included in the supplemental log. If you
* do not specify a string value, DMS uses the null value for these columns
* regardless of the UseCsvNoSupValue
setting.
This * setting is supported in DMS versions 3.4.1 and later.
*/ inline void SetCsvNoSupValue(Aws::String&& value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue = std::move(value); } /** *This setting only applies if your Amazon S3 output files during a change data
* capture (CDC) load are written in .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that
* you want DMS to use for all columns not included in the supplemental log. If you
* do not specify a string value, DMS uses the null value for these columns
* regardless of the UseCsvNoSupValue
setting.
This * setting is supported in DMS versions 3.4.1 and later.
*/ inline void SetCsvNoSupValue(const char* value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue.assign(value); } /** *This setting only applies if your Amazon S3 output files during a change data
* capture (CDC) load are written in .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that
* you want DMS to use for all columns not included in the supplemental log. If you
* do not specify a string value, DMS uses the null value for these columns
* regardless of the UseCsvNoSupValue
setting.
This * setting is supported in DMS versions 3.4.1 and later.
*/ inline S3Settings& WithCsvNoSupValue(const Aws::String& value) { SetCsvNoSupValue(value); return *this;} /** *This setting only applies if your Amazon S3 output files during a change data
* capture (CDC) load are written in .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that
* you want DMS to use for all columns not included in the supplemental log. If you
* do not specify a string value, DMS uses the null value for these columns
* regardless of the UseCsvNoSupValue
setting.
This * setting is supported in DMS versions 3.4.1 and later.
*/ inline S3Settings& WithCsvNoSupValue(Aws::String&& value) { SetCsvNoSupValue(std::move(value)); return *this;} /** *This setting only applies if your Amazon S3 output files during a change data
* capture (CDC) load are written in .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that
* you want DMS to use for all columns not included in the supplemental log. If you
* do not specify a string value, DMS uses the null value for these columns
* regardless of the UseCsvNoSupValue
setting.
This * setting is supported in DMS versions 3.4.1 and later.
*/ inline S3Settings& WithCsvNoSupValue(const char* value) { SetCsvNoSupValue(value); return *this;} /** *If set to true
, DMS saves the transaction order for a change
* data capture (CDC) load on the Amazon S3 target specified by
* CdcPath
. For more information, see Capturing
* data changes (CDC) including transaction order on the S3 target.
This setting is supported in DMS versions 3.4.2 and later.
*/ inline bool GetPreserveTransactions() const{ return m_preserveTransactions; } /** *If set to true
, DMS saves the transaction order for a change
* data capture (CDC) load on the Amazon S3 target specified by
* CdcPath
. For more information, see Capturing
* data changes (CDC) including transaction order on the S3 target.
This setting is supported in DMS versions 3.4.2 and later.
*/ inline bool PreserveTransactionsHasBeenSet() const { return m_preserveTransactionsHasBeenSet; } /** *If set to true
, DMS saves the transaction order for a change
* data capture (CDC) load on the Amazon S3 target specified by
* CdcPath
. For more information, see Capturing
* data changes (CDC) including transaction order on the S3 target.
This setting is supported in DMS versions 3.4.2 and later.
*/ inline void SetPreserveTransactions(bool value) { m_preserveTransactionsHasBeenSet = true; m_preserveTransactions = value; } /** *If set to true
, DMS saves the transaction order for a change
* data capture (CDC) load on the Amazon S3 target specified by
* CdcPath
. For more information, see Capturing
* data changes (CDC) including transaction order on the S3 target.
This setting is supported in DMS versions 3.4.2 and later.
*/ inline S3Settings& WithPreserveTransactions(bool value) { SetPreserveTransactions(value); return *this;} /** *Specifies the folder path of CDC files. For an S3 source, this setting is
* required if a task captures change data; otherwise, it's optional. If
* CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set
* PreserveTransactions
to true
, DMS verifies that
* you have set this parameter to a folder path on your S3 target where DMS can
* save the transaction order for the CDC load. DMS creates this CDC folder path in
* either your S3 target working directory or the S3 target location specified by
*
* BucketFolder
and
* BucketName
.
For example, if you specify
* CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify
* BucketFolder
, DMS creates the CDC folder path following:
* MyTargetBucket/MyChangedData
.
If you specify the same
* CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as
* MyTargetData
, DMS creates the CDC folder path following:
* MyTargetBucket/MyTargetData/MyChangedData
.
For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.
*This setting is supported in DMS versions 3.4.2 and later.
*/ inline const Aws::String& GetCdcPath() const{ return m_cdcPath; } /** *Specifies the folder path of CDC files. For an S3 source, this setting is
* required if a task captures change data; otherwise, it's optional. If
* CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set
* PreserveTransactions
to true
, DMS verifies that
* you have set this parameter to a folder path on your S3 target where DMS can
* save the transaction order for the CDC load. DMS creates this CDC folder path in
* either your S3 target working directory or the S3 target location specified by
*
* BucketFolder
and
* BucketName
.
For example, if you specify
* CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify
* BucketFolder
, DMS creates the CDC folder path following:
* MyTargetBucket/MyChangedData
.
If you specify the same
* CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as
* MyTargetData
, DMS creates the CDC folder path following:
* MyTargetBucket/MyTargetData/MyChangedData
.
For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.
*This setting is supported in DMS versions 3.4.2 and later.
*/ inline bool CdcPathHasBeenSet() const { return m_cdcPathHasBeenSet; } /** *Specifies the folder path of CDC files. For an S3 source, this setting is
* required if a task captures change data; otherwise, it's optional. If
* CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set
* PreserveTransactions
to true
, DMS verifies that
* you have set this parameter to a folder path on your S3 target where DMS can
* save the transaction order for the CDC load. DMS creates this CDC folder path in
* either your S3 target working directory or the S3 target location specified by
*
* BucketFolder
and
* BucketName
.
For example, if you specify
* CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify
* BucketFolder
, DMS creates the CDC folder path following:
* MyTargetBucket/MyChangedData
.
If you specify the same
* CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as
* MyTargetData
, DMS creates the CDC folder path following:
* MyTargetBucket/MyTargetData/MyChangedData
.
For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.
*This setting is supported in DMS versions 3.4.2 and later.
*/ inline void SetCdcPath(const Aws::String& value) { m_cdcPathHasBeenSet = true; m_cdcPath = value; } /** *Specifies the folder path of CDC files. For an S3 source, this setting is
* required if a task captures change data; otherwise, it's optional. If
* CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set
* PreserveTransactions
to true
, DMS verifies that
* you have set this parameter to a folder path on your S3 target where DMS can
* save the transaction order for the CDC load. DMS creates this CDC folder path in
* either your S3 target working directory or the S3 target location specified by
*
* BucketFolder
and
* BucketName
.
For example, if you specify
* CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify
* BucketFolder
, DMS creates the CDC folder path following:
* MyTargetBucket/MyChangedData
.
If you specify the same
* CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as
* MyTargetData
, DMS creates the CDC folder path following:
* MyTargetBucket/MyTargetData/MyChangedData
.
For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.
*This setting is supported in DMS versions 3.4.2 and later.
*/ inline void SetCdcPath(Aws::String&& value) { m_cdcPathHasBeenSet = true; m_cdcPath = std::move(value); } /** *Specifies the folder path of CDC files. For an S3 source, this setting is
* required if a task captures change data; otherwise, it's optional. If
* CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set
* PreserveTransactions
to true
, DMS verifies that
* you have set this parameter to a folder path on your S3 target where DMS can
* save the transaction order for the CDC load. DMS creates this CDC folder path in
* either your S3 target working directory or the S3 target location specified by
*
* BucketFolder
and
* BucketName
.
For example, if you specify
* CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify
* BucketFolder
, DMS creates the CDC folder path following:
* MyTargetBucket/MyChangedData
.
If you specify the same
* CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as
* MyTargetData
, DMS creates the CDC folder path following:
* MyTargetBucket/MyTargetData/MyChangedData
.
For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.
*This setting is supported in DMS versions 3.4.2 and later.
*/ inline void SetCdcPath(const char* value) { m_cdcPathHasBeenSet = true; m_cdcPath.assign(value); } /** *Specifies the folder path of CDC files. For an S3 source, this setting is
* required if a task captures change data; otherwise, it's optional. If
* CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set
* PreserveTransactions
to true
, DMS verifies that
* you have set this parameter to a folder path on your S3 target where DMS can
* save the transaction order for the CDC load. DMS creates this CDC folder path in
* either your S3 target working directory or the S3 target location specified by
*
* BucketFolder
and
* BucketName
.
For example, if you specify
* CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify
* BucketFolder
, DMS creates the CDC folder path following:
* MyTargetBucket/MyChangedData
.
If you specify the same
* CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as
* MyTargetData
, DMS creates the CDC folder path following:
* MyTargetBucket/MyTargetData/MyChangedData
.
For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.
*This setting is supported in DMS versions 3.4.2 and later.
*/ inline S3Settings& WithCdcPath(const Aws::String& value) { SetCdcPath(value); return *this;} /** *Specifies the folder path of CDC files. For an S3 source, this setting is
* required if a task captures change data; otherwise, it's optional. If
* CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set
* PreserveTransactions
to true
, DMS verifies that
* you have set this parameter to a folder path on your S3 target where DMS can
* save the transaction order for the CDC load. DMS creates this CDC folder path in
* either your S3 target working directory or the S3 target location specified by
*
* BucketFolder
and
* BucketName
.
For example, if you specify
* CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify
* BucketFolder
, DMS creates the CDC folder path following:
* MyTargetBucket/MyChangedData
.
If you specify the same
* CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as
* MyTargetData
, DMS creates the CDC folder path following:
* MyTargetBucket/MyTargetData/MyChangedData
.
For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.
*This setting is supported in DMS versions 3.4.2 and later.
*/ inline S3Settings& WithCdcPath(Aws::String&& value) { SetCdcPath(std::move(value)); return *this;} /** *Specifies the folder path of CDC files. For an S3 source, this setting is
* required if a task captures change data; otherwise, it's optional. If
* CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set
* PreserveTransactions
to true
, DMS verifies that
* you have set this parameter to a folder path on your S3 target where DMS can
* save the transaction order for the CDC load. DMS creates this CDC folder path in
* either your S3 target working directory or the S3 target location specified by
*
* BucketFolder
and
* BucketName
.
For example, if you specify
* CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify
* BucketFolder
, DMS creates the CDC folder path following:
* MyTargetBucket/MyChangedData
.
If you specify the same
* CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as
* MyTargetData
, DMS creates the CDC folder path following:
* MyTargetBucket/MyTargetData/MyChangedData
.
For more * information on CDC including transaction order on an S3 target, see Capturing * data changes (CDC) including transaction order on the S3 target.
*This setting is supported in DMS versions 3.4.2 and later.
*/ inline S3Settings& WithCdcPath(const char* value) { SetCdcPath(value); return *this;} /** *When set to true, this parameter uses the task start time as the timestamp
* column value instead of the time data is written to target. For full load, when
* useTaskStartTimeForFullLoadTimestamp
is set to true
,
* each row of the timestamp column contains the task start time. For CDC loads,
* each row of the timestamp column contains the transaction commit time.
When useTaskStartTimeForFullLoadTimestamp
is set to
* false
, the full load timestamp in the timestamp column increments
* with the time data arrives at the target.
When set to true, this parameter uses the task start time as the timestamp
* column value instead of the time data is written to target. For full load, when
* useTaskStartTimeForFullLoadTimestamp
is set to true
,
* each row of the timestamp column contains the task start time. For CDC loads,
* each row of the timestamp column contains the transaction commit time.
When useTaskStartTimeForFullLoadTimestamp
is set to
* false
, the full load timestamp in the timestamp column increments
* with the time data arrives at the target.
When set to true, this parameter uses the task start time as the timestamp
* column value instead of the time data is written to target. For full load, when
* useTaskStartTimeForFullLoadTimestamp
is set to true
,
* each row of the timestamp column contains the task start time. For CDC loads,
* each row of the timestamp column contains the transaction commit time.
When useTaskStartTimeForFullLoadTimestamp
is set to
* false
, the full load timestamp in the timestamp column increments
* with the time data arrives at the target.
When set to true, this parameter uses the task start time as the timestamp
* column value instead of the time data is written to target. For full load, when
* useTaskStartTimeForFullLoadTimestamp
is set to true
,
* each row of the timestamp column contains the task start time. For CDC loads,
* each row of the timestamp column contains the transaction commit time.
When useTaskStartTimeForFullLoadTimestamp
is set to
* false
, the full load timestamp in the timestamp column increments
* with the time data arrives at the target.
A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.
The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.
*/ inline const CannedAclForObjectsValue& GetCannedAclForObjects() const{ return m_cannedAclForObjects; } /** *A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.
The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.
*/ inline bool CannedAclForObjectsHasBeenSet() const { return m_cannedAclForObjectsHasBeenSet; } /** *A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.
The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.
*/ inline void SetCannedAclForObjects(const CannedAclForObjectsValue& value) { m_cannedAclForObjectsHasBeenSet = true; m_cannedAclForObjects = value; } /** *A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.
The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.
*/ inline void SetCannedAclForObjects(CannedAclForObjectsValue&& value) { m_cannedAclForObjectsHasBeenSet = true; m_cannedAclForObjects = std::move(value); } /** *A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.
The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.
*/ inline S3Settings& WithCannedAclForObjects(const CannedAclForObjectsValue& value) { SetCannedAclForObjects(value); return *this;} /** *A value that enables DMS to specify a predefined (canned) access control list * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more * information about Amazon S3 canned ACLs, see Canned * ACL in the Amazon S3 Developer Guide.
The default value is * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and * BUCKET_OWNER_FULL_CONTROL.
*/ inline S3Settings& WithCannedAclForObjects(CannedAclForObjectsValue&& value) { SetCannedAclForObjects(std::move(value)); return *this;} /** *An optional parameter that, when set to true
or y
,
* you can use to add column name information to the .csv output file.
The
* default value is false
. Valid values are true
,
* false
, y
, and n
.
An optional parameter that, when set to true
or y
,
* you can use to add column name information to the .csv output file.
The
* default value is false
. Valid values are true
,
* false
, y
, and n
.
An optional parameter that, when set to true
or y
,
* you can use to add column name information to the .csv output file.
The
* default value is false
. Valid values are true
,
* false
, y
, and n
.
An optional parameter that, when set to true
or y
,
* you can use to add column name information to the .csv output file.
The
* default value is false
. Valid values are true
,
* false
, y
, and n
.
Maximum length of the interval, defined in seconds, after which to output a * file to Amazon S3.
When CdcMaxBatchInterval
and
* CdcMinFileSize
are both specified, the file write is triggered by
* whichever parameter condition is met first within an DMS CloudFormation
* template.
The default value is 60 seconds.
*/ inline int GetCdcMaxBatchInterval() const{ return m_cdcMaxBatchInterval; } /** *Maximum length of the interval, defined in seconds, after which to output a * file to Amazon S3.
When CdcMaxBatchInterval
and
* CdcMinFileSize
are both specified, the file write is triggered by
* whichever parameter condition is met first within an DMS CloudFormation
* template.
The default value is 60 seconds.
*/ inline bool CdcMaxBatchIntervalHasBeenSet() const { return m_cdcMaxBatchIntervalHasBeenSet; } /** *Maximum length of the interval, defined in seconds, after which to output a * file to Amazon S3.
When CdcMaxBatchInterval
and
* CdcMinFileSize
are both specified, the file write is triggered by
* whichever parameter condition is met first within an DMS CloudFormation
* template.
The default value is 60 seconds.
*/ inline void SetCdcMaxBatchInterval(int value) { m_cdcMaxBatchIntervalHasBeenSet = true; m_cdcMaxBatchInterval = value; } /** *Maximum length of the interval, defined in seconds, after which to output a * file to Amazon S3.
When CdcMaxBatchInterval
and
* CdcMinFileSize
are both specified, the file write is triggered by
* whichever parameter condition is met first within an DMS CloudFormation
* template.
The default value is 60 seconds.
*/ inline S3Settings& WithCdcMaxBatchInterval(int value) { SetCdcMaxBatchInterval(value); return *this;} /** *Minimum file size, defined in kilobytes, to reach for a file output to Amazon * S3.
When CdcMinFileSize
and CdcMaxBatchInterval
* are both specified, the file write is triggered by whichever parameter condition
* is met first within an DMS CloudFormation template.
The default value is * 32 MB.
*/ inline int GetCdcMinFileSize() const{ return m_cdcMinFileSize; } /** *Minimum file size, defined in kilobytes, to reach for a file output to Amazon * S3.
When CdcMinFileSize
and CdcMaxBatchInterval
* are both specified, the file write is triggered by whichever parameter condition
* is met first within an DMS CloudFormation template.
The default value is * 32 MB.
*/ inline bool CdcMinFileSizeHasBeenSet() const { return m_cdcMinFileSizeHasBeenSet; } /** *Minimum file size, defined in kilobytes, to reach for a file output to Amazon * S3.
When CdcMinFileSize
and CdcMaxBatchInterval
* are both specified, the file write is triggered by whichever parameter condition
* is met first within an DMS CloudFormation template.
The default value is * 32 MB.
*/ inline void SetCdcMinFileSize(int value) { m_cdcMinFileSizeHasBeenSet = true; m_cdcMinFileSize = value; } /** *Minimum file size, defined in kilobytes, to reach for a file output to Amazon * S3.
When CdcMinFileSize
and CdcMaxBatchInterval
* are both specified, the file write is triggered by whichever parameter condition
* is met first within an DMS CloudFormation template.
The default value is * 32 MB.
*/ inline S3Settings& WithCdcMinFileSize(int value) { SetCdcMinFileSize(value); return *this;} /** *An optional parameter that specifies how DMS treats null values. While
* handling the null value, you can use this parameter to pass a user-defined
* string as null when writing to the target. For example, when target columns are
* not nullable, you can use this option to differentiate between the empty string
* value and the null value. So, if you set this parameter value to the empty
* string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
The default value is NULL
. Valid values
* include any valid string.
An optional parameter that specifies how DMS treats null values. While
* handling the null value, you can use this parameter to pass a user-defined
* string as null when writing to the target. For example, when target columns are
* not nullable, you can use this option to differentiate between the empty string
* value and the null value. So, if you set this parameter value to the empty
* string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
The default value is NULL
. Valid values
* include any valid string.
An optional parameter that specifies how DMS treats null values. While
* handling the null value, you can use this parameter to pass a user-defined
* string as null when writing to the target. For example, when target columns are
* not nullable, you can use this option to differentiate between the empty string
* value and the null value. So, if you set this parameter value to the empty
* string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
The default value is NULL
. Valid values
* include any valid string.
An optional parameter that specifies how DMS treats null values. While
* handling the null value, you can use this parameter to pass a user-defined
* string as null when writing to the target. For example, when target columns are
* not nullable, you can use this option to differentiate between the empty string
* value and the null value. So, if you set this parameter value to the empty
* string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
The default value is NULL
. Valid values
* include any valid string.
An optional parameter that specifies how DMS treats null values. While
* handling the null value, you can use this parameter to pass a user-defined
* string as null when writing to the target. For example, when target columns are
* not nullable, you can use this option to differentiate between the empty string
* value and the null value. So, if you set this parameter value to the empty
* string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
The default value is NULL
. Valid values
* include any valid string.
An optional parameter that specifies how DMS treats null values. While
* handling the null value, you can use this parameter to pass a user-defined
* string as null when writing to the target. For example, when target columns are
* not nullable, you can use this option to differentiate between the empty string
* value and the null value. So, if you set this parameter value to the empty
* string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
The default value is NULL
. Valid values
* include any valid string.
An optional parameter that specifies how DMS treats null values. While
* handling the null value, you can use this parameter to pass a user-defined
* string as null when writing to the target. For example, when target columns are
* not nullable, you can use this option to differentiate between the empty string
* value and the null value. So, if you set this parameter value to the empty
* string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
The default value is NULL
. Valid values
* include any valid string.
An optional parameter that specifies how DMS treats null values. While
* handling the null value, you can use this parameter to pass a user-defined
* string as null when writing to the target. For example, when target columns are
* not nullable, you can use this option to differentiate between the empty string
* value and the null value. So, if you set this parameter value to the empty
* string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
The default value is NULL
. Valid values
* include any valid string.
When this value is set to 1, DMS ignores the first row header in a .csv file. * A value of 1 turns on the feature; a value of 0 turns off the feature.
*The default is 0.
*/ inline int GetIgnoreHeaderRows() const{ return m_ignoreHeaderRows; } /** *When this value is set to 1, DMS ignores the first row header in a .csv file. * A value of 1 turns on the feature; a value of 0 turns off the feature.
*The default is 0.
*/ inline bool IgnoreHeaderRowsHasBeenSet() const { return m_ignoreHeaderRowsHasBeenSet; } /** *When this value is set to 1, DMS ignores the first row header in a .csv file. * A value of 1 turns on the feature; a value of 0 turns off the feature.
*The default is 0.
*/ inline void SetIgnoreHeaderRows(int value) { m_ignoreHeaderRowsHasBeenSet = true; m_ignoreHeaderRows = value; } /** *When this value is set to 1, DMS ignores the first row header in a .csv file. * A value of 1 turns on the feature; a value of 0 turns off the feature.
*The default is 0.
*/ inline S3Settings& WithIgnoreHeaderRows(int value) { SetIgnoreHeaderRows(value); return *this;} /** *A value that specifies the maximum size (in KB) of any .csv file to be * created while migrating to an S3 target during full load.
The default * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*/ inline int GetMaxFileSize() const{ return m_maxFileSize; } /** *A value that specifies the maximum size (in KB) of any .csv file to be * created while migrating to an S3 target during full load.
The default * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*/ inline bool MaxFileSizeHasBeenSet() const { return m_maxFileSizeHasBeenSet; } /** *A value that specifies the maximum size (in KB) of any .csv file to be * created while migrating to an S3 target during full load.
The default * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*/ inline void SetMaxFileSize(int value) { m_maxFileSizeHasBeenSet = true; m_maxFileSize = value; } /** *A value that specifies the maximum size (in KB) of any .csv file to be * created while migrating to an S3 target during full load.
The default * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*/ inline S3Settings& WithMaxFileSize(int value) { SetMaxFileSize(value); return *this;} /** *For an S3 source, when this value is set to true
or
* y
, each leading double quotation mark has to be followed by an
* ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied
* to the target as is. In this case, a delimiter (row or column) signals the end
* of the field. Thus, you can't use a delimiter as part of the string, because it
* signals the end of the value.
For an S3 target, an optional parameter
* used to set behavior to comply with RFC 4180 for data migrated to Amazon S3
* using .csv file format only. When this value is set to true
or
* y
using Amazon S3 as a target, if the data has quotation marks or
* newline characters in it, DMS encloses the entire column with an additional pair
* of double quotation marks ("). Every quotation mark within the data is repeated
* twice.
The default value is true
. Valid values include
* true
, false
, y
, and n
.
For an S3 source, when this value is set to true
or
* y
, each leading double quotation mark has to be followed by an
* ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied
* to the target as is. In this case, a delimiter (row or column) signals the end
* of the field. Thus, you can't use a delimiter as part of the string, because it
* signals the end of the value.
For an S3 target, an optional parameter
* used to set behavior to comply with RFC 4180 for data migrated to Amazon S3
* using .csv file format only. When this value is set to true
or
* y
using Amazon S3 as a target, if the data has quotation marks or
* newline characters in it, DMS encloses the entire column with an additional pair
* of double quotation marks ("). Every quotation mark within the data is repeated
* twice.
The default value is true
. Valid values include
* true
, false
, y
, and n
.
For an S3 source, when this value is set to true
or
* y
, each leading double quotation mark has to be followed by an
* ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied
* to the target as is. In this case, a delimiter (row or column) signals the end
* of the field. Thus, you can't use a delimiter as part of the string, because it
* signals the end of the value.
For an S3 target, an optional parameter
* used to set behavior to comply with RFC 4180 for data migrated to Amazon S3
* using .csv file format only. When this value is set to true
or
* y
using Amazon S3 as a target, if the data has quotation marks or
* newline characters in it, DMS encloses the entire column with an additional pair
* of double quotation marks ("). Every quotation mark within the data is repeated
* twice.
The default value is true
. Valid values include
* true
, false
, y
, and n
.
For an S3 source, when this value is set to true
or
* y
, each leading double quotation mark has to be followed by an
* ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied
* to the target as is. In this case, a delimiter (row or column) signals the end
* of the field. Thus, you can't use a delimiter as part of the string, because it
* signals the end of the value.
For an S3 target, an optional parameter
* used to set behavior to comply with RFC 4180 for data migrated to Amazon S3
* using .csv file format only. When this value is set to true
or
* y
using Amazon S3 as a target, if the data has quotation marks or
* newline characters in it, DMS encloses the entire column with an additional pair
* of double quotation marks ("). Every quotation mark within the data is repeated
* twice.
The default value is true
. Valid values include
* true
, false
, y
, and n
.
When creating an S3 target endpoint, set DatePartitionTimezone
* to convert the current UTC time into a specified time zone. The conversion
* occurs when a date partition folder is created and a CDC filename is generated.
* The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the
* following example.
s3-settings='{"DatePartitionEnabled": true,
* "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH",
* "DatePartitionTimezone":"Asia/Seoul", "BucketName":
* "dms-nattarat-test"}'
When creating an S3 target endpoint, set DatePartitionTimezone
* to convert the current UTC time into a specified time zone. The conversion
* occurs when a date partition folder is created and a CDC filename is generated.
* The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the
* following example.
s3-settings='{"DatePartitionEnabled": true,
* "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH",
* "DatePartitionTimezone":"Asia/Seoul", "BucketName":
* "dms-nattarat-test"}'
When creating an S3 target endpoint, set DatePartitionTimezone
* to convert the current UTC time into a specified time zone. The conversion
* occurs when a date partition folder is created and a CDC filename is generated.
* The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the
* following example.
s3-settings='{"DatePartitionEnabled": true,
* "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH",
* "DatePartitionTimezone":"Asia/Seoul", "BucketName":
* "dms-nattarat-test"}'
When creating an S3 target endpoint, set DatePartitionTimezone
* to convert the current UTC time into a specified time zone. The conversion
* occurs when a date partition folder is created and a CDC filename is generated.
* The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the
* following example.
s3-settings='{"DatePartitionEnabled": true,
* "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH",
* "DatePartitionTimezone":"Asia/Seoul", "BucketName":
* "dms-nattarat-test"}'
When creating an S3 target endpoint, set DatePartitionTimezone
* to convert the current UTC time into a specified time zone. The conversion
* occurs when a date partition folder is created and a CDC filename is generated.
* The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the
* following example.
s3-settings='{"DatePartitionEnabled": true,
* "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH",
* "DatePartitionTimezone":"Asia/Seoul", "BucketName":
* "dms-nattarat-test"}'
When creating an S3 target endpoint, set DatePartitionTimezone
* to convert the current UTC time into a specified time zone. The conversion
* occurs when a date partition folder is created and a CDC filename is generated.
* The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the
* following example.
s3-settings='{"DatePartitionEnabled": true,
* "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH",
* "DatePartitionTimezone":"Asia/Seoul", "BucketName":
* "dms-nattarat-test"}'
When creating an S3 target endpoint, set DatePartitionTimezone
* to convert the current UTC time into a specified time zone. The conversion
* occurs when a date partition folder is created and a CDC filename is generated.
* The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the
* following example.
s3-settings='{"DatePartitionEnabled": true,
* "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH",
* "DatePartitionTimezone":"Asia/Seoul", "BucketName":
* "dms-nattarat-test"}'
When creating an S3 target endpoint, set DatePartitionTimezone
* to convert the current UTC time into a specified time zone. The conversion
* occurs when a date partition folder is created and a CDC filename is generated.
* The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the
* following example.
s3-settings='{"DatePartitionEnabled": true,
* "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH",
* "DatePartitionTimezone":"Asia/Seoul", "BucketName":
* "dms-nattarat-test"}'
Use the S3 target endpoint setting AddTrailingPaddingCharacter
* to add padding on string data. The default value is false
.
Use the S3 target endpoint setting AddTrailingPaddingCharacter
* to add padding on string data. The default value is false
.
Use the S3 target endpoint setting AddTrailingPaddingCharacter
* to add padding on string data. The default value is false
.
Use the S3 target endpoint setting AddTrailingPaddingCharacter
* to add padding on string data. The default value is false
.
To specify a bucket owner and prevent sniping, you can use the
* ExpectedBucketOwner
endpoint setting.
Example:
* --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.
*/ inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; } /** *To specify a bucket owner and prevent sniping, you can use the
* ExpectedBucketOwner
endpoint setting.
Example:
* --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.
*/ inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; } /** *To specify a bucket owner and prevent sniping, you can use the
* ExpectedBucketOwner
endpoint setting.
Example:
* --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.
*/ inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; } /** *To specify a bucket owner and prevent sniping, you can use the
* ExpectedBucketOwner
endpoint setting.
Example:
* --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.
*/ inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); } /** *To specify a bucket owner and prevent sniping, you can use the
* ExpectedBucketOwner
endpoint setting.
Example:
* --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.
*/ inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); } /** *To specify a bucket owner and prevent sniping, you can use the
* ExpectedBucketOwner
endpoint setting.
Example:
* --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.
*/ inline S3Settings& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;} /** *To specify a bucket owner and prevent sniping, you can use the
* ExpectedBucketOwner
endpoint setting.
Example:
* --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.
*/ inline S3Settings& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;} /** *To specify a bucket owner and prevent sniping, you can use the
* ExpectedBucketOwner
endpoint setting.
Example:
* --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
When you make a request to test a connection or perform a migration, S3 * checks the account ID of the bucket owner against the specified parameter.
*/ inline S3Settings& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;} /** *When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog * lets you use Athena to query your data.
*/ inline bool GetGlueCatalogGeneration() const{ return m_glueCatalogGeneration; } /** *When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog * lets you use Athena to query your data.
*/ inline bool GlueCatalogGenerationHasBeenSet() const { return m_glueCatalogGenerationHasBeenSet; } /** *When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog * lets you use Athena to query your data.
*/ inline void SetGlueCatalogGeneration(bool value) { m_glueCatalogGenerationHasBeenSet = true; m_glueCatalogGeneration = value; } /** *When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog * lets you use Athena to query your data.
*/ inline S3Settings& WithGlueCatalogGeneration(bool value) { SetGlueCatalogGeneration(value); return *this;} private: Aws::String m_serviceAccessRoleArn; bool m_serviceAccessRoleArnHasBeenSet = false; Aws::String m_externalTableDefinition; bool m_externalTableDefinitionHasBeenSet = false; Aws::String m_csvRowDelimiter; bool m_csvRowDelimiterHasBeenSet = false; Aws::String m_csvDelimiter; bool m_csvDelimiterHasBeenSet = false; Aws::String m_bucketFolder; bool m_bucketFolderHasBeenSet = false; Aws::String m_bucketName; bool m_bucketNameHasBeenSet = false; CompressionTypeValue m_compressionType; bool m_compressionTypeHasBeenSet = false; EncryptionModeValue m_encryptionMode; bool m_encryptionModeHasBeenSet = false; Aws::String m_serverSideEncryptionKmsKeyId; bool m_serverSideEncryptionKmsKeyIdHasBeenSet = false; DataFormatValue m_dataFormat; bool m_dataFormatHasBeenSet = false; EncodingTypeValue m_encodingType; bool m_encodingTypeHasBeenSet = false; int m_dictPageSizeLimit; bool m_dictPageSizeLimitHasBeenSet = false; int m_rowGroupLength; bool m_rowGroupLengthHasBeenSet = false; int m_dataPageSize; bool m_dataPageSizeHasBeenSet = false; ParquetVersionValue m_parquetVersion; bool m_parquetVersionHasBeenSet = false; bool m_enableStatistics; bool m_enableStatisticsHasBeenSet = false; bool m_includeOpForFullLoad; bool m_includeOpForFullLoadHasBeenSet = false; bool m_cdcInsertsOnly; bool m_cdcInsertsOnlyHasBeenSet = false; Aws::String m_timestampColumnName; bool m_timestampColumnNameHasBeenSet = false; bool m_parquetTimestampInMillisecond; bool m_parquetTimestampInMillisecondHasBeenSet = false; bool m_cdcInsertsAndUpdates; bool m_cdcInsertsAndUpdatesHasBeenSet = false; bool m_datePartitionEnabled; bool m_datePartitionEnabledHasBeenSet = false; DatePartitionSequenceValue m_datePartitionSequence; bool m_datePartitionSequenceHasBeenSet = false; DatePartitionDelimiterValue m_datePartitionDelimiter; bool m_datePartitionDelimiterHasBeenSet = false; bool m_useCsvNoSupValue; bool m_useCsvNoSupValueHasBeenSet = false; Aws::String m_csvNoSupValue; bool m_csvNoSupValueHasBeenSet = false; bool m_preserveTransactions; bool m_preserveTransactionsHasBeenSet = false; Aws::String m_cdcPath; bool m_cdcPathHasBeenSet = false; bool m_useTaskStartTimeForFullLoadTimestamp; bool m_useTaskStartTimeForFullLoadTimestampHasBeenSet = false; CannedAclForObjectsValue m_cannedAclForObjects; bool m_cannedAclForObjectsHasBeenSet = false; bool m_addColumnName; bool m_addColumnNameHasBeenSet = false; int m_cdcMaxBatchInterval; bool m_cdcMaxBatchIntervalHasBeenSet = false; int m_cdcMinFileSize; bool m_cdcMinFileSizeHasBeenSet = false; Aws::String m_csvNullValue; bool m_csvNullValueHasBeenSet = false; int m_ignoreHeaderRows; bool m_ignoreHeaderRowsHasBeenSet = false; int m_maxFileSize; bool m_maxFileSizeHasBeenSet = false; bool m_rfc4180; bool m_rfc4180HasBeenSet = false; Aws::String m_datePartitionTimezone; bool m_datePartitionTimezoneHasBeenSet = false; bool m_addTrailingPaddingCharacter; bool m_addTrailingPaddingCharacterHasBeenSet = false; Aws::String m_expectedBucketOwner; bool m_expectedBucketOwnerHasBeenSet = false; bool m_glueCatalogGeneration; bool m_glueCatalogGenerationHasBeenSet = false; }; } // namespace Model } // namespace DatabaseMigrationService } // namespace Aws