/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Provides information that defines an Amazon Redshift endpoint.See
* Also:
AWS
* API Reference
A value that indicates to allow any date format, including invalid formats
* such as 00/00/00 00:00:00, to be loaded without generating an error. You can
* choose true
or false
(the default).
This * parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE * with the DATEFORMAT parameter. If the date format for the data doesn't match the * DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field. *
*/ inline bool GetAcceptAnyDate() const{ return m_acceptAnyDate; } /** *A value that indicates to allow any date format, including invalid formats
* such as 00/00/00 00:00:00, to be loaded without generating an error. You can
* choose true
or false
(the default).
This * parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE * with the DATEFORMAT parameter. If the date format for the data doesn't match the * DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field. *
*/ inline bool AcceptAnyDateHasBeenSet() const { return m_acceptAnyDateHasBeenSet; } /** *A value that indicates to allow any date format, including invalid formats
* such as 00/00/00 00:00:00, to be loaded without generating an error. You can
* choose true
or false
(the default).
This * parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE * with the DATEFORMAT parameter. If the date format for the data doesn't match the * DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field. *
*/ inline void SetAcceptAnyDate(bool value) { m_acceptAnyDateHasBeenSet = true; m_acceptAnyDate = value; } /** *A value that indicates to allow any date format, including invalid formats
* such as 00/00/00 00:00:00, to be loaded without generating an error. You can
* choose true
or false
(the default).
This * parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE * with the DATEFORMAT parameter. If the date format for the data doesn't match the * DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field. *
*/ inline RedshiftSettings& WithAcceptAnyDate(bool value) { SetAcceptAnyDate(value); return *this;} /** *Code to run after connecting. This parameter should contain the code itself, * not the name of a file containing the code.
*/ inline const Aws::String& GetAfterConnectScript() const{ return m_afterConnectScript; } /** *Code to run after connecting. This parameter should contain the code itself, * not the name of a file containing the code.
*/ inline bool AfterConnectScriptHasBeenSet() const { return m_afterConnectScriptHasBeenSet; } /** *Code to run after connecting. This parameter should contain the code itself, * not the name of a file containing the code.
*/ inline void SetAfterConnectScript(const Aws::String& value) { m_afterConnectScriptHasBeenSet = true; m_afterConnectScript = value; } /** *Code to run after connecting. This parameter should contain the code itself, * not the name of a file containing the code.
*/ inline void SetAfterConnectScript(Aws::String&& value) { m_afterConnectScriptHasBeenSet = true; m_afterConnectScript = std::move(value); } /** *Code to run after connecting. This parameter should contain the code itself, * not the name of a file containing the code.
*/ inline void SetAfterConnectScript(const char* value) { m_afterConnectScriptHasBeenSet = true; m_afterConnectScript.assign(value); } /** *Code to run after connecting. This parameter should contain the code itself, * not the name of a file containing the code.
*/ inline RedshiftSettings& WithAfterConnectScript(const Aws::String& value) { SetAfterConnectScript(value); return *this;} /** *Code to run after connecting. This parameter should contain the code itself, * not the name of a file containing the code.
*/ inline RedshiftSettings& WithAfterConnectScript(Aws::String&& value) { SetAfterConnectScript(std::move(value)); return *this;} /** *Code to run after connecting. This parameter should contain the code itself, * not the name of a file containing the code.
*/ inline RedshiftSettings& WithAfterConnectScript(const char* value) { SetAfterConnectScript(value); return *this;} /** *An S3 folder where the comma-separated-value (.csv) files are stored before * being uploaded to the target Redshift cluster.
For full load mode, DMS
* converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
* command to upload the .csv files to the target table. The files are deleted once
* the COPY
operation has finished. For more information, see COPY in
* the Amazon Redshift Database Developer Guide.
For * change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads * the .csv files to this BucketFolder/NetChangesTableID path.
*/ inline const Aws::String& GetBucketFolder() const{ return m_bucketFolder; } /** *An S3 folder where the comma-separated-value (.csv) files are stored before * being uploaded to the target Redshift cluster.
For full load mode, DMS
* converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
* command to upload the .csv files to the target table. The files are deleted once
* the COPY
operation has finished. For more information, see COPY in
* the Amazon Redshift Database Developer Guide.
For * change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads * the .csv files to this BucketFolder/NetChangesTableID path.
*/ inline bool BucketFolderHasBeenSet() const { return m_bucketFolderHasBeenSet; } /** *An S3 folder where the comma-separated-value (.csv) files are stored before * being uploaded to the target Redshift cluster.
For full load mode, DMS
* converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
* command to upload the .csv files to the target table. The files are deleted once
* the COPY
operation has finished. For more information, see COPY in
* the Amazon Redshift Database Developer Guide.
For * change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads * the .csv files to this BucketFolder/NetChangesTableID path.
*/ inline void SetBucketFolder(const Aws::String& value) { m_bucketFolderHasBeenSet = true; m_bucketFolder = value; } /** *An S3 folder where the comma-separated-value (.csv) files are stored before * being uploaded to the target Redshift cluster.
For full load mode, DMS
* converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
* command to upload the .csv files to the target table. The files are deleted once
* the COPY
operation has finished. For more information, see COPY in
* the Amazon Redshift Database Developer Guide.
For * change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads * the .csv files to this BucketFolder/NetChangesTableID path.
*/ inline void SetBucketFolder(Aws::String&& value) { m_bucketFolderHasBeenSet = true; m_bucketFolder = std::move(value); } /** *An S3 folder where the comma-separated-value (.csv) files are stored before * being uploaded to the target Redshift cluster.
For full load mode, DMS
* converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
* command to upload the .csv files to the target table. The files are deleted once
* the COPY
operation has finished. For more information, see COPY in
* the Amazon Redshift Database Developer Guide.
For * change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads * the .csv files to this BucketFolder/NetChangesTableID path.
*/ inline void SetBucketFolder(const char* value) { m_bucketFolderHasBeenSet = true; m_bucketFolder.assign(value); } /** *An S3 folder where the comma-separated-value (.csv) files are stored before * being uploaded to the target Redshift cluster.
For full load mode, DMS
* converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
* command to upload the .csv files to the target table. The files are deleted once
* the COPY
operation has finished. For more information, see COPY in
* the Amazon Redshift Database Developer Guide.
For * change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads * the .csv files to this BucketFolder/NetChangesTableID path.
*/ inline RedshiftSettings& WithBucketFolder(const Aws::String& value) { SetBucketFolder(value); return *this;} /** *An S3 folder where the comma-separated-value (.csv) files are stored before * being uploaded to the target Redshift cluster.
For full load mode, DMS
* converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
* command to upload the .csv files to the target table. The files are deleted once
* the COPY
operation has finished. For more information, see COPY in
* the Amazon Redshift Database Developer Guide.
For * change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads * the .csv files to this BucketFolder/NetChangesTableID path.
*/ inline RedshiftSettings& WithBucketFolder(Aws::String&& value) { SetBucketFolder(std::move(value)); return *this;} /** *An S3 folder where the comma-separated-value (.csv) files are stored before * being uploaded to the target Redshift cluster.
For full load mode, DMS
* converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
* command to upload the .csv files to the target table. The files are deleted once
* the COPY
operation has finished. For more information, see COPY in
* the Amazon Redshift Database Developer Guide.
For * change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads * the .csv files to this BucketFolder/NetChangesTableID path.
*/ inline RedshiftSettings& WithBucketFolder(const char* value) { SetBucketFolder(value); return *this;} /** *The name of the intermediate S3 bucket used to store .csv files before * uploading data to Redshift.
*/ inline const Aws::String& GetBucketName() const{ return m_bucketName; } /** *The name of the intermediate S3 bucket used to store .csv files before * uploading data to Redshift.
*/ inline bool BucketNameHasBeenSet() const { return m_bucketNameHasBeenSet; } /** *The name of the intermediate S3 bucket used to store .csv files before * uploading data to Redshift.
*/ inline void SetBucketName(const Aws::String& value) { m_bucketNameHasBeenSet = true; m_bucketName = value; } /** *The name of the intermediate S3 bucket used to store .csv files before * uploading data to Redshift.
*/ inline void SetBucketName(Aws::String&& value) { m_bucketNameHasBeenSet = true; m_bucketName = std::move(value); } /** *The name of the intermediate S3 bucket used to store .csv files before * uploading data to Redshift.
*/ inline void SetBucketName(const char* value) { m_bucketNameHasBeenSet = true; m_bucketName.assign(value); } /** *The name of the intermediate S3 bucket used to store .csv files before * uploading data to Redshift.
*/ inline RedshiftSettings& WithBucketName(const Aws::String& value) { SetBucketName(value); return *this;} /** *The name of the intermediate S3 bucket used to store .csv files before * uploading data to Redshift.
*/ inline RedshiftSettings& WithBucketName(Aws::String&& value) { SetBucketName(std::move(value)); return *this;} /** *The name of the intermediate S3 bucket used to store .csv files before * uploading data to Redshift.
*/ inline RedshiftSettings& WithBucketName(const char* value) { SetBucketName(value); return *this;} /** *If Amazon Redshift is configured to support case sensitive schema names, set
* CaseSensitiveNames
to true
. The default is
* false
.
If Amazon Redshift is configured to support case sensitive schema names, set
* CaseSensitiveNames
to true
. The default is
* false
.
If Amazon Redshift is configured to support case sensitive schema names, set
* CaseSensitiveNames
to true
. The default is
* false
.
If Amazon Redshift is configured to support case sensitive schema names, set
* CaseSensitiveNames
to true
. The default is
* false
.
If you set CompUpdate
to true
Amazon Redshift
* applies automatic compression if the table is empty. This applies even if the
* table columns already have encodings other than RAW
. If you set
* CompUpdate
to false
, automatic compression is disabled
* and existing column encodings aren't changed. The default is
* true
.
If you set CompUpdate
to true
Amazon Redshift
* applies automatic compression if the table is empty. This applies even if the
* table columns already have encodings other than RAW
. If you set
* CompUpdate
to false
, automatic compression is disabled
* and existing column encodings aren't changed. The default is
* true
.
If you set CompUpdate
to true
Amazon Redshift
* applies automatic compression if the table is empty. This applies even if the
* table columns already have encodings other than RAW
. If you set
* CompUpdate
to false
, automatic compression is disabled
* and existing column encodings aren't changed. The default is
* true
.
If you set CompUpdate
to true
Amazon Redshift
* applies automatic compression if the table is empty. This applies even if the
* table columns already have encodings other than RAW
. If you set
* CompUpdate
to false
, automatic compression is disabled
* and existing column encodings aren't changed. The default is
* true
.
A value that sets the amount of time to wait (in milliseconds) before timing * out, beginning from when you initially establish a connection.
*/ inline int GetConnectionTimeout() const{ return m_connectionTimeout; } /** *A value that sets the amount of time to wait (in milliseconds) before timing * out, beginning from when you initially establish a connection.
*/ inline bool ConnectionTimeoutHasBeenSet() const { return m_connectionTimeoutHasBeenSet; } /** *A value that sets the amount of time to wait (in milliseconds) before timing * out, beginning from when you initially establish a connection.
*/ inline void SetConnectionTimeout(int value) { m_connectionTimeoutHasBeenSet = true; m_connectionTimeout = value; } /** *A value that sets the amount of time to wait (in milliseconds) before timing * out, beginning from when you initially establish a connection.
*/ inline RedshiftSettings& WithConnectionTimeout(int value) { SetConnectionTimeout(value); return *this;} /** *The name of the Amazon Redshift data warehouse (service) that you are working * with.
*/ inline const Aws::String& GetDatabaseName() const{ return m_databaseName; } /** *The name of the Amazon Redshift data warehouse (service) that you are working * with.
*/ inline bool DatabaseNameHasBeenSet() const { return m_databaseNameHasBeenSet; } /** *The name of the Amazon Redshift data warehouse (service) that you are working * with.
*/ inline void SetDatabaseName(const Aws::String& value) { m_databaseNameHasBeenSet = true; m_databaseName = value; } /** *The name of the Amazon Redshift data warehouse (service) that you are working * with.
*/ inline void SetDatabaseName(Aws::String&& value) { m_databaseNameHasBeenSet = true; m_databaseName = std::move(value); } /** *The name of the Amazon Redshift data warehouse (service) that you are working * with.
*/ inline void SetDatabaseName(const char* value) { m_databaseNameHasBeenSet = true; m_databaseName.assign(value); } /** *The name of the Amazon Redshift data warehouse (service) that you are working * with.
*/ inline RedshiftSettings& WithDatabaseName(const Aws::String& value) { SetDatabaseName(value); return *this;} /** *The name of the Amazon Redshift data warehouse (service) that you are working * with.
*/ inline RedshiftSettings& WithDatabaseName(Aws::String&& value) { SetDatabaseName(std::move(value)); return *this;} /** *The name of the Amazon Redshift data warehouse (service) that you are working * with.
*/ inline RedshiftSettings& WithDatabaseName(const char* value) { SetDatabaseName(value); return *this;} /** *The date format that you are using. Valid values are auto
* (case-sensitive), your date format string enclosed in quotes, or NULL. If this
* parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using
* auto
recognizes most strings, even some that aren't supported when
* you use a date format string.
If your date and time values use formats
* different from each other, set this to auto
.
The date format that you are using. Valid values are auto
* (case-sensitive), your date format string enclosed in quotes, or NULL. If this
* parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using
* auto
recognizes most strings, even some that aren't supported when
* you use a date format string.
If your date and time values use formats
* different from each other, set this to auto
.
The date format that you are using. Valid values are auto
* (case-sensitive), your date format string enclosed in quotes, or NULL. If this
* parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using
* auto
recognizes most strings, even some that aren't supported when
* you use a date format string.
If your date and time values use formats
* different from each other, set this to auto
.
The date format that you are using. Valid values are auto
* (case-sensitive), your date format string enclosed in quotes, or NULL. If this
* parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using
* auto
recognizes most strings, even some that aren't supported when
* you use a date format string.
If your date and time values use formats
* different from each other, set this to auto
.
The date format that you are using. Valid values are auto
* (case-sensitive), your date format string enclosed in quotes, or NULL. If this
* parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using
* auto
recognizes most strings, even some that aren't supported when
* you use a date format string.
If your date and time values use formats
* different from each other, set this to auto
.
The date format that you are using. Valid values are auto
* (case-sensitive), your date format string enclosed in quotes, or NULL. If this
* parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using
* auto
recognizes most strings, even some that aren't supported when
* you use a date format string.
If your date and time values use formats
* different from each other, set this to auto
.
The date format that you are using. Valid values are auto
* (case-sensitive), your date format string enclosed in quotes, or NULL. If this
* parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using
* auto
recognizes most strings, even some that aren't supported when
* you use a date format string.
If your date and time values use formats
* different from each other, set this to auto
.
The date format that you are using. Valid values are auto
* (case-sensitive), your date format string enclosed in quotes, or NULL. If this
* parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using
* auto
recognizes most strings, even some that aren't supported when
* you use a date format string.
If your date and time values use formats
* different from each other, set this to auto
.
A value that specifies whether DMS should migrate empty CHAR and VARCHAR
* fields as NULL. A value of true
sets empty CHAR and VARCHAR fields
* to null. The default is false
.
A value that specifies whether DMS should migrate empty CHAR and VARCHAR
* fields as NULL. A value of true
sets empty CHAR and VARCHAR fields
* to null. The default is false
.
A value that specifies whether DMS should migrate empty CHAR and VARCHAR
* fields as NULL. A value of true
sets empty CHAR and VARCHAR fields
* to null. The default is false
.
A value that specifies whether DMS should migrate empty CHAR and VARCHAR
* fields as NULL. A value of true
sets empty CHAR and VARCHAR fields
* to null. The default is false
.
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, create an Identity and Access Management (IAM) role with a
* policy that allows "arn:aws:s3:::*"
to use the following actions:
* "s3:PutObject", "s3:ListBucket"
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, create an Identity and Access Management (IAM) role with a
* policy that allows "arn:aws:s3:::*"
to use the following actions:
* "s3:PutObject", "s3:ListBucket"
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, create an Identity and Access Management (IAM) role with a
* policy that allows "arn:aws:s3:::*"
to use the following actions:
* "s3:PutObject", "s3:ListBucket"
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, create an Identity and Access Management (IAM) role with a
* policy that allows "arn:aws:s3:::*"
to use the following actions:
* "s3:PutObject", "s3:ListBucket"
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, create an Identity and Access Management (IAM) role with a
* policy that allows "arn:aws:s3:::*"
to use the following actions:
* "s3:PutObject", "s3:ListBucket"
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3
(the
* default) or SSE_KMS
.
For the
* ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to
* SSE_S3
. But you can’t change the existing value from
* SSE_S3
to SSE_KMS
.
To use
* SSE_S3
, create an Identity and Access Management (IAM) role with a
* policy that allows "arn:aws:s3:::*"
to use the following actions:
* "s3:PutObject", "s3:ListBucket"
This setting is only valid for a full-load migration task. Set
* ExplicitIds
to true
to have tables with
* IDENTITY
columns override their auto-generated values with explicit
* values loaded from the source data files used to populate the tables. The
* default is false
.
This setting is only valid for a full-load migration task. Set
* ExplicitIds
to true
to have tables with
* IDENTITY
columns override their auto-generated values with explicit
* values loaded from the source data files used to populate the tables. The
* default is false
.
This setting is only valid for a full-load migration task. Set
* ExplicitIds
to true
to have tables with
* IDENTITY
columns override their auto-generated values with explicit
* values loaded from the source data files used to populate the tables. The
* default is false
.
This setting is only valid for a full-load migration task. Set
* ExplicitIds
to true
to have tables with
* IDENTITY
columns override their auto-generated values with explicit
* values loaded from the source data files used to populate the tables. The
* default is false
.
The number of threads used to upload a single file. This parameter accepts a * value from 1 through 64. It defaults to 10.
The number of parallel * streams used to upload a single .csv file to an S3 bucket using S3 Multipart * Upload. For more information, see Multipart * upload overview.
FileTransferUploadStreams
accepts a
* value from 1 through 64. It defaults to 10.
The number of threads used to upload a single file. This parameter accepts a * value from 1 through 64. It defaults to 10.
The number of parallel * streams used to upload a single .csv file to an S3 bucket using S3 Multipart * Upload. For more information, see Multipart * upload overview.
FileTransferUploadStreams
accepts a
* value from 1 through 64. It defaults to 10.
The number of threads used to upload a single file. This parameter accepts a * value from 1 through 64. It defaults to 10.
The number of parallel * streams used to upload a single .csv file to an S3 bucket using S3 Multipart * Upload. For more information, see Multipart * upload overview.
FileTransferUploadStreams
accepts a
* value from 1 through 64. It defaults to 10.
The number of threads used to upload a single file. This parameter accepts a * value from 1 through 64. It defaults to 10.
The number of parallel * streams used to upload a single .csv file to an S3 bucket using S3 Multipart * Upload. For more information, see Multipart * upload overview.
FileTransferUploadStreams
accepts a
* value from 1 through 64. It defaults to 10.
The amount of time to wait (in milliseconds) before timing out of operations * performed by DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, * and UPDATE.
*/ inline int GetLoadTimeout() const{ return m_loadTimeout; } /** *The amount of time to wait (in milliseconds) before timing out of operations * performed by DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, * and UPDATE.
*/ inline bool LoadTimeoutHasBeenSet() const { return m_loadTimeoutHasBeenSet; } /** *The amount of time to wait (in milliseconds) before timing out of operations * performed by DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, * and UPDATE.
*/ inline void SetLoadTimeout(int value) { m_loadTimeoutHasBeenSet = true; m_loadTimeout = value; } /** *The amount of time to wait (in milliseconds) before timing out of operations * performed by DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, * and UPDATE.
*/ inline RedshiftSettings& WithLoadTimeout(int value) { SetLoadTimeout(value); return *this;} /** *The maximum size (in KB) of any .csv file used to load data on an S3 bucket * and transfer data to Amazon Redshift. It defaults to 1048576KB (1 GB).
*/ inline int GetMaxFileSize() const{ return m_maxFileSize; } /** *The maximum size (in KB) of any .csv file used to load data on an S3 bucket * and transfer data to Amazon Redshift. It defaults to 1048576KB (1 GB).
*/ inline bool MaxFileSizeHasBeenSet() const { return m_maxFileSizeHasBeenSet; } /** *The maximum size (in KB) of any .csv file used to load data on an S3 bucket * and transfer data to Amazon Redshift. It defaults to 1048576KB (1 GB).
*/ inline void SetMaxFileSize(int value) { m_maxFileSizeHasBeenSet = true; m_maxFileSize = value; } /** *The maximum size (in KB) of any .csv file used to load data on an S3 bucket * and transfer data to Amazon Redshift. It defaults to 1048576KB (1 GB).
*/ inline RedshiftSettings& WithMaxFileSize(int value) { SetMaxFileSize(value); return *this;} /** *The password for the user named in the username
property.
The password for the user named in the username
property.
The password for the user named in the username
property.
The password for the user named in the username
property.
The password for the user named in the username
property.
The password for the user named in the username
property.
The password for the user named in the username
property.
The password for the user named in the username
property.
The port number for Amazon Redshift. The default value is 5439.
*/ inline int GetPort() const{ return m_port; } /** *The port number for Amazon Redshift. The default value is 5439.
*/ inline bool PortHasBeenSet() const { return m_portHasBeenSet; } /** *The port number for Amazon Redshift. The default value is 5439.
*/ inline void SetPort(int value) { m_portHasBeenSet = true; m_port = value; } /** *The port number for Amazon Redshift. The default value is 5439.
*/ inline RedshiftSettings& WithPort(int value) { SetPort(value); return *this;} /** *A value that specifies to remove surrounding quotation marks from strings in
* the incoming data. All characters within the quotation marks, including
* delimiters, are retained. Choose true
to remove quotation marks.
* The default is false
.
A value that specifies to remove surrounding quotation marks from strings in
* the incoming data. All characters within the quotation marks, including
* delimiters, are retained. Choose true
to remove quotation marks.
* The default is false
.
A value that specifies to remove surrounding quotation marks from strings in
* the incoming data. All characters within the quotation marks, including
* delimiters, are retained. Choose true
to remove quotation marks.
* The default is false
.
A value that specifies to remove surrounding quotation marks from strings in
* the incoming data. All characters within the quotation marks, including
* delimiters, are retained. Choose true
to remove quotation marks.
* The default is false
.
A list of characters that you want to replace. Use with
* ReplaceChars
.
A list of characters that you want to replace. Use with
* ReplaceChars
.
A list of characters that you want to replace. Use with
* ReplaceChars
.
A list of characters that you want to replace. Use with
* ReplaceChars
.
A list of characters that you want to replace. Use with
* ReplaceChars
.
A list of characters that you want to replace. Use with
* ReplaceChars
.
A list of characters that you want to replace. Use with
* ReplaceChars
.
A list of characters that you want to replace. Use with
* ReplaceChars
.
A value that specifies to replaces the invalid characters specified in
* ReplaceInvalidChars
, substituting the specified characters instead.
* The default is "?"
.
A value that specifies to replaces the invalid characters specified in
* ReplaceInvalidChars
, substituting the specified characters instead.
* The default is "?"
.
A value that specifies to replaces the invalid characters specified in
* ReplaceInvalidChars
, substituting the specified characters instead.
* The default is "?"
.
A value that specifies to replaces the invalid characters specified in
* ReplaceInvalidChars
, substituting the specified characters instead.
* The default is "?"
.
A value that specifies to replaces the invalid characters specified in
* ReplaceInvalidChars
, substituting the specified characters instead.
* The default is "?"
.
A value that specifies to replaces the invalid characters specified in
* ReplaceInvalidChars
, substituting the specified characters instead.
* The default is "?"
.
A value that specifies to replaces the invalid characters specified in
* ReplaceInvalidChars
, substituting the specified characters instead.
* The default is "?"
.
A value that specifies to replaces the invalid characters specified in
* ReplaceInvalidChars
, substituting the specified characters instead.
* The default is "?"
.
The name of the Amazon Redshift cluster you are using.
*/ inline const Aws::String& GetServerName() const{ return m_serverName; } /** *The name of the Amazon Redshift cluster you are using.
*/ inline bool ServerNameHasBeenSet() const { return m_serverNameHasBeenSet; } /** *The name of the Amazon Redshift cluster you are using.
*/ inline void SetServerName(const Aws::String& value) { m_serverNameHasBeenSet = true; m_serverName = value; } /** *The name of the Amazon Redshift cluster you are using.
*/ inline void SetServerName(Aws::String&& value) { m_serverNameHasBeenSet = true; m_serverName = std::move(value); } /** *The name of the Amazon Redshift cluster you are using.
*/ inline void SetServerName(const char* value) { m_serverNameHasBeenSet = true; m_serverName.assign(value); } /** *The name of the Amazon Redshift cluster you are using.
*/ inline RedshiftSettings& WithServerName(const Aws::String& value) { SetServerName(value); return *this;} /** *The name of the Amazon Redshift cluster you are using.
*/ inline RedshiftSettings& WithServerName(Aws::String&& value) { SetServerName(std::move(value)); return *this;} /** *The name of the Amazon Redshift cluster you are using.
*/ inline RedshiftSettings& WithServerName(const char* value) { SetServerName(value); return *this;} /** *The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon
* Redshift service. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon
* Redshift service. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon
* Redshift service. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon
* Redshift service. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon
* Redshift service. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon
* Redshift service. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon
* Redshift service. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon
* Redshift service. The role must allow the iam:PassRole
action.
The KMS key ID. If you are using SSE_KMS
for the
* EncryptionMode
, provide this key ID. The key that you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
The KMS key ID. If you are using SSE_KMS
for the
* EncryptionMode
, provide this key ID. The key that you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
The KMS key ID. If you are using SSE_KMS
for the
* EncryptionMode
, provide this key ID. The key that you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
The KMS key ID. If you are using SSE_KMS
for the
* EncryptionMode
, provide this key ID. The key that you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
The KMS key ID. If you are using SSE_KMS
for the
* EncryptionMode
, provide this key ID. The key that you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
The KMS key ID. If you are using SSE_KMS
for the
* EncryptionMode
, provide this key ID. The key that you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
The KMS key ID. If you are using SSE_KMS
for the
* EncryptionMode
, provide this key ID. The key that you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
The KMS key ID. If you are using SSE_KMS
for the
* EncryptionMode
, provide this key ID. The key that you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
The time format that you want to use. Valid values are auto
* (case-sensitive), 'timeformat_string'
, 'epochsecs'
, or
* 'epochmillisecs'
. It defaults to 10. Using auto
* recognizes most strings, even some that aren't supported when you use a time
* format string.
If your date and time values use formats different from
* each other, set this parameter to auto
.
The time format that you want to use. Valid values are auto
* (case-sensitive), 'timeformat_string'
, 'epochsecs'
, or
* 'epochmillisecs'
. It defaults to 10. Using auto
* recognizes most strings, even some that aren't supported when you use a time
* format string.
If your date and time values use formats different from
* each other, set this parameter to auto
.
The time format that you want to use. Valid values are auto
* (case-sensitive), 'timeformat_string'
, 'epochsecs'
, or
* 'epochmillisecs'
. It defaults to 10. Using auto
* recognizes most strings, even some that aren't supported when you use a time
* format string.
If your date and time values use formats different from
* each other, set this parameter to auto
.
The time format that you want to use. Valid values are auto
* (case-sensitive), 'timeformat_string'
, 'epochsecs'
, or
* 'epochmillisecs'
. It defaults to 10. Using auto
* recognizes most strings, even some that aren't supported when you use a time
* format string.
If your date and time values use formats different from
* each other, set this parameter to auto
.
The time format that you want to use. Valid values are auto
* (case-sensitive), 'timeformat_string'
, 'epochsecs'
, or
* 'epochmillisecs'
. It defaults to 10. Using auto
* recognizes most strings, even some that aren't supported when you use a time
* format string.
If your date and time values use formats different from
* each other, set this parameter to auto
.
The time format that you want to use. Valid values are auto
* (case-sensitive), 'timeformat_string'
, 'epochsecs'
, or
* 'epochmillisecs'
. It defaults to 10. Using auto
* recognizes most strings, even some that aren't supported when you use a time
* format string.
If your date and time values use formats different from
* each other, set this parameter to auto
.
The time format that you want to use. Valid values are auto
* (case-sensitive), 'timeformat_string'
, 'epochsecs'
, or
* 'epochmillisecs'
. It defaults to 10. Using auto
* recognizes most strings, even some that aren't supported when you use a time
* format string.
If your date and time values use formats different from
* each other, set this parameter to auto
.
The time format that you want to use. Valid values are auto
* (case-sensitive), 'timeformat_string'
, 'epochsecs'
, or
* 'epochmillisecs'
. It defaults to 10. Using auto
* recognizes most strings, even some that aren't supported when you use a time
* format string.
If your date and time values use formats different from
* each other, set this parameter to auto
.
A value that specifies to remove the trailing white space characters from a
* VARCHAR string. This parameter applies only to columns with a VARCHAR data type.
* Choose true
to remove unneeded white space. The default is
* false
.
A value that specifies to remove the trailing white space characters from a
* VARCHAR string. This parameter applies only to columns with a VARCHAR data type.
* Choose true
to remove unneeded white space. The default is
* false
.
A value that specifies to remove the trailing white space characters from a
* VARCHAR string. This parameter applies only to columns with a VARCHAR data type.
* Choose true
to remove unneeded white space. The default is
* false
.
A value that specifies to remove the trailing white space characters from a
* VARCHAR string. This parameter applies only to columns with a VARCHAR data type.
* Choose true
to remove unneeded white space. The default is
* false
.
A value that specifies to truncate data in columns to the appropriate number
* of characters, so that the data fits in the column. This parameter applies only
* to columns with a VARCHAR or CHAR data type, and rows with a size of 4 MB or
* less. Choose true
to truncate data. The default is
* false
.
A value that specifies to truncate data in columns to the appropriate number
* of characters, so that the data fits in the column. This parameter applies only
* to columns with a VARCHAR or CHAR data type, and rows with a size of 4 MB or
* less. Choose true
to truncate data. The default is
* false
.
A value that specifies to truncate data in columns to the appropriate number
* of characters, so that the data fits in the column. This parameter applies only
* to columns with a VARCHAR or CHAR data type, and rows with a size of 4 MB or
* less. Choose true
to truncate data. The default is
* false
.
A value that specifies to truncate data in columns to the appropriate number
* of characters, so that the data fits in the column. This parameter applies only
* to columns with a VARCHAR or CHAR data type, and rows with a size of 4 MB or
* less. Choose true
to truncate data. The default is
* false
.
An Amazon Redshift user name for a registered user.
*/ inline const Aws::String& GetUsername() const{ return m_username; } /** *An Amazon Redshift user name for a registered user.
*/ inline bool UsernameHasBeenSet() const { return m_usernameHasBeenSet; } /** *An Amazon Redshift user name for a registered user.
*/ inline void SetUsername(const Aws::String& value) { m_usernameHasBeenSet = true; m_username = value; } /** *An Amazon Redshift user name for a registered user.
*/ inline void SetUsername(Aws::String&& value) { m_usernameHasBeenSet = true; m_username = std::move(value); } /** *An Amazon Redshift user name for a registered user.
*/ inline void SetUsername(const char* value) { m_usernameHasBeenSet = true; m_username.assign(value); } /** *An Amazon Redshift user name for a registered user.
*/ inline RedshiftSettings& WithUsername(const Aws::String& value) { SetUsername(value); return *this;} /** *An Amazon Redshift user name for a registered user.
*/ inline RedshiftSettings& WithUsername(Aws::String&& value) { SetUsername(std::move(value)); return *this;} /** *An Amazon Redshift user name for a registered user.
*/ inline RedshiftSettings& WithUsername(const char* value) { SetUsername(value); return *this;} /** *The size (in KB) of the in-memory file write buffer used when generating .csv * files on the local disk at the DMS replication instance. The default value is * 1000 (buffer size is 1000KB).
*/ inline int GetWriteBufferSize() const{ return m_writeBufferSize; } /** *The size (in KB) of the in-memory file write buffer used when generating .csv * files on the local disk at the DMS replication instance. The default value is * 1000 (buffer size is 1000KB).
*/ inline bool WriteBufferSizeHasBeenSet() const { return m_writeBufferSizeHasBeenSet; } /** *The size (in KB) of the in-memory file write buffer used when generating .csv * files on the local disk at the DMS replication instance. The default value is * 1000 (buffer size is 1000KB).
*/ inline void SetWriteBufferSize(int value) { m_writeBufferSizeHasBeenSet = true; m_writeBufferSize = value; } /** *The size (in KB) of the in-memory file write buffer used when generating .csv * files on the local disk at the DMS replication instance. The default value is * 1000 (buffer size is 1000KB).
*/ inline RedshiftSettings& WithWriteBufferSize(int value) { SetWriteBufferSize(value); return *this;} /** *The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the
* trusted entity and grants the required permissions to access the value in
* SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the
* value of the Amazon Web Services Secrets Manager secret that allows access to
* the Amazon Redshift endpoint.
You can specify one of two sets of
* values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and
* Port
. You can't specify both. For more information on creating this
* SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
* required to access it, see Using
* secrets to access Database Migration Service resources in the Database
* Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the
* trusted entity and grants the required permissions to access the value in
* SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the
* value of the Amazon Web Services Secrets Manager secret that allows access to
* the Amazon Redshift endpoint.
You can specify one of two sets of
* values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and
* Port
. You can't specify both. For more information on creating this
* SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
* required to access it, see Using
* secrets to access Database Migration Service resources in the Database
* Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the
* trusted entity and grants the required permissions to access the value in
* SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the
* value of the Amazon Web Services Secrets Manager secret that allows access to
* the Amazon Redshift endpoint.
You can specify one of two sets of
* values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and
* Port
. You can't specify both. For more information on creating this
* SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
* required to access it, see Using
* secrets to access Database Migration Service resources in the Database
* Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the
* trusted entity and grants the required permissions to access the value in
* SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the
* value of the Amazon Web Services Secrets Manager secret that allows access to
* the Amazon Redshift endpoint.
You can specify one of two sets of
* values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and
* Port
. You can't specify both. For more information on creating this
* SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
* required to access it, see Using
* secrets to access Database Migration Service resources in the Database
* Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the
* trusted entity and grants the required permissions to access the value in
* SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the
* value of the Amazon Web Services Secrets Manager secret that allows access to
* the Amazon Redshift endpoint.
You can specify one of two sets of
* values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and
* Port
. You can't specify both. For more information on creating this
* SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
* required to access it, see Using
* secrets to access Database Migration Service resources in the Database
* Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the
* trusted entity and grants the required permissions to access the value in
* SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the
* value of the Amazon Web Services Secrets Manager secret that allows access to
* the Amazon Redshift endpoint.
You can specify one of two sets of
* values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and
* Port
. You can't specify both. For more information on creating this
* SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
* required to access it, see Using
* secrets to access Database Migration Service resources in the Database
* Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the
* trusted entity and grants the required permissions to access the value in
* SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the
* value of the Amazon Web Services Secrets Manager secret that allows access to
* the Amazon Redshift endpoint.
You can specify one of two sets of
* values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and
* Port
. You can't specify both. For more information on creating this
* SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
* required to access it, see Using
* secrets to access Database Migration Service resources in the Database
* Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the
* trusted entity and grants the required permissions to access the value in
* SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the
* value of the Amazon Web Services Secrets Manager secret that allows access to
* the Amazon Redshift endpoint.
You can specify one of two sets of
* values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and
* Port
. You can't specify both. For more information on creating this
* SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
* required to access it, see Using
* secrets to access Database Migration Service resources in the Database
* Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the
* SecretsManagerSecret
that contains the Amazon Redshift endpoint
* connection details.
The full ARN, partial ARN, or friendly name of the
* SecretsManagerSecret
that contains the Amazon Redshift endpoint
* connection details.
The full ARN, partial ARN, or friendly name of the
* SecretsManagerSecret
that contains the Amazon Redshift endpoint
* connection details.
The full ARN, partial ARN, or friendly name of the
* SecretsManagerSecret
that contains the Amazon Redshift endpoint
* connection details.
The full ARN, partial ARN, or friendly name of the
* SecretsManagerSecret
that contains the Amazon Redshift endpoint
* connection details.
The full ARN, partial ARN, or friendly name of the
* SecretsManagerSecret
that contains the Amazon Redshift endpoint
* connection details.
The full ARN, partial ARN, or friendly name of the
* SecretsManagerSecret
that contains the Amazon Redshift endpoint
* connection details.
The full ARN, partial ARN, or friendly name of the
* SecretsManagerSecret
that contains the Amazon Redshift endpoint
* connection details.
When true, lets Redshift migrate the boolean type as boolean. By default,
* Redshift migrates booleans as varchar(1)
.
When true, lets Redshift migrate the boolean type as boolean. By default,
* Redshift migrates booleans as varchar(1)
.
When true, lets Redshift migrate the boolean type as boolean. By default,
* Redshift migrates booleans as varchar(1)
.
When true, lets Redshift migrate the boolean type as boolean. By default,
* Redshift migrates booleans as varchar(1)
.