/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.databasemigrationservice.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* Provides information that defines an Amazon Redshift endpoint. *
* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class RedshiftSettings implements Serializable, Cloneable, StructuredPojo { /** *
* A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be
* loaded without generating an error. You can choose true
or false
(the default).
*
* This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a * NULL value into that field. *
*/ private Boolean acceptAnyDate; /** ** Code to run after connecting. This parameter should contain the code itself, not the name of a file containing * the code. *
*/ private String afterConnectScript; /** ** An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift * cluster. *
*
* For full load mode, DMS converts source records into .csv files and loads them to the BucketFolder/TableID
* path. DMS uses the Redshift COPY
command to upload the .csv files to the target table. The files are
* deleted once the COPY
operation has finished. For more information, see COPY in the Amazon Redshift Database
* Developer Guide.
*
* For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to this * BucketFolder/NetChangesTableID path. *
*/ private String bucketFolder; /** ** The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift. *
*/ private String bucketName; /** *
* If Amazon Redshift is configured to support case sensitive schema names, set CaseSensitiveNames
to
* true
. The default is false
.
*
* If you set CompUpdate
to true
Amazon Redshift applies automatic compression if the
* table is empty. This applies even if the table columns already have encodings other than RAW
. If you
* set CompUpdate
to false
, automatic compression is disabled and existing column
* encodings aren't changed. The default is true
.
*
* A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you * initially establish a connection. *
*/ private Integer connectionTimeout; /** ** The name of the Amazon Redshift data warehouse (service) that you are working with. *
*/ private String databaseName; /** *
* The date format that you are using. Valid values are auto
(case-sensitive), your date format string
* enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'.
* Using auto
recognizes most strings, even some that aren't supported when you use a date format
* string.
*
* If your date and time values use formats different from each other, set this to auto
.
*
* A value that specifies whether DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of
* true
sets empty CHAR and VARCHAR fields to null. The default is false
.
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
*
* This setting is only valid for a full-load migration task. Set ExplicitIds
to true
to
* have tables with IDENTITY
columns override their auto-generated values with explicit values loaded
* from the source data files used to populate the tables. The default is false
.
*
* The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults * to 10. *
** The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart Upload. For * more information, see Multipart upload * overview. *
*
* FileTransferUploadStreams
accepts a value from 1 through 64. It defaults to 10.
*
* The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a Redshift * cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE. *
*/ private Integer loadTimeout; /** ** The maximum size (in KB) of any .csv file used to load data on an S3 bucket and transfer data to Amazon Redshift. * It defaults to 1048576KB (1 GB). *
*/ private Integer maxFileSize; /** *
* The password for the user named in the username
property.
*
* The port number for Amazon Redshift. The default value is 5439. *
*/ private Integer port; /** *
* A value that specifies to remove surrounding quotation marks from strings in the incoming data. All characters
* within the quotation marks, including delimiters, are retained. Choose true
to remove quotation
* marks. The default is false
.
*
* A list of characters that you want to replace. Use with ReplaceChars
.
*
* A value that specifies to replaces the invalid characters specified in ReplaceInvalidChars
,
* substituting the specified characters instead. The default is "?"
.
*
* The name of the Amazon Redshift cluster you are using. *
*/ private String serverName; /** *
* The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service. The role must
* allow the iam:PassRole
action.
*
* The KMS key ID. If you are using SSE_KMS
for the EncryptionMode
, provide this key ID.
* The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.
*
* The time format that you want to use. Valid values are auto
(case-sensitive),
* 'timeformat_string'
, 'epochsecs'
, or 'epochmillisecs'
. It defaults to 10.
* Using auto
recognizes most strings, even some that aren't supported when you use a time format
* string.
*
* If your date and time values use formats different from each other, set this parameter to auto
.
*
* A value that specifies to remove the trailing white space characters from a VARCHAR string. This parameter
* applies only to columns with a VARCHAR data type. Choose true
to remove unneeded white space. The
* default is false
.
*
* A value that specifies to truncate data in columns to the appropriate number of characters, so that the data fits
* in the column. This parameter applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4
* MB or less. Choose true
to truncate data. The default is false
.
*
* An Amazon Redshift user name for a registered user. *
*/ private String username; /** ** The size (in KB) of the in-memory file write buffer used when generating .csv files on the local disk at the DMS * replication instance. The default value is 1000 (buffer size is 1000KB). *
*/ private Integer writeBufferSize; /** *
* The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the
* required permissions to access the value in SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services
* Secrets Manager secret that allows access to the Amazon Redshift endpoint.
*
* You can specify one of two sets of values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for UserName
,
* Password
, ServerName
, and Port
. You can't specify both. For more
* information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
* and SecretsManagerSecretId
required to access it, see Using
* secrets to access Database Migration Service resources in the Database Migration Service User Guide.
*
* The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the Amazon
* Redshift endpoint connection details.
*
* When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as
* varchar(1)
.
*
* A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be
* loaded without generating an error. You can choose true
or false
(the default).
*
* This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a * NULL value into that field. *
* * @param acceptAnyDate * A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to * be loaded without generating an error. You can choosetrue
or false
(the
* default).
* * This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift * inserts a NULL value into that field. */ public void setAcceptAnyDate(Boolean acceptAnyDate) { this.acceptAnyDate = acceptAnyDate; } /** *
* A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be
* loaded without generating an error. You can choose true
or false
(the default).
*
* This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a * NULL value into that field. *
* * @return A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to * be loaded without generating an error. You can choosetrue
or false
(the
* default).
* * This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift * inserts a NULL value into that field. */ public Boolean getAcceptAnyDate() { return this.acceptAnyDate; } /** *
* A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be
* loaded without generating an error. You can choose true
or false
(the default).
*
* This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a * NULL value into that field. *
* * @param acceptAnyDate * A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to * be loaded without generating an error. You can choosetrue
or false
(the
* default).
* * This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift * inserts a NULL value into that field. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withAcceptAnyDate(Boolean acceptAnyDate) { setAcceptAnyDate(acceptAnyDate); return this; } /** *
* A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be
* loaded without generating an error. You can choose true
or false
(the default).
*
* This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a * NULL value into that field. *
* * @return A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to * be loaded without generating an error. You can choosetrue
or false
(the
* default).
* * This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT * parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift * inserts a NULL value into that field. */ public Boolean isAcceptAnyDate() { return this.acceptAnyDate; } /** *
* Code to run after connecting. This parameter should contain the code itself, not the name of a file containing * the code. *
* * @param afterConnectScript * Code to run after connecting. This parameter should contain the code itself, not the name of a file * containing the code. */ public void setAfterConnectScript(String afterConnectScript) { this.afterConnectScript = afterConnectScript; } /** ** Code to run after connecting. This parameter should contain the code itself, not the name of a file containing * the code. *
* * @return Code to run after connecting. This parameter should contain the code itself, not the name of a file * containing the code. */ public String getAfterConnectScript() { return this.afterConnectScript; } /** ** Code to run after connecting. This parameter should contain the code itself, not the name of a file containing * the code. *
* * @param afterConnectScript * Code to run after connecting. This parameter should contain the code itself, not the name of a file * containing the code. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withAfterConnectScript(String afterConnectScript) { setAfterConnectScript(afterConnectScript); return this; } /** ** An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift * cluster. *
*
* For full load mode, DMS converts source records into .csv files and loads them to the BucketFolder/TableID
* path. DMS uses the Redshift COPY
command to upload the .csv files to the target table. The files are
* deleted once the COPY
operation has finished. For more information, see COPY in the Amazon Redshift Database
* Developer Guide.
*
* For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to this * BucketFolder/NetChangesTableID path. *
* * @param bucketFolder * An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target * Redshift cluster. *
* For full load mode, DMS converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
command to upload the .csv files
* to the target table. The files are deleted once the COPY
operation has finished. For more
* information, see COPY in the
* Amazon Redshift Database Developer Guide.
*
* For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to * this BucketFolder/NetChangesTableID path. */ public void setBucketFolder(String bucketFolder) { this.bucketFolder = bucketFolder; } /** *
* An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift * cluster. *
*
* For full load mode, DMS converts source records into .csv files and loads them to the BucketFolder/TableID
* path. DMS uses the Redshift COPY
command to upload the .csv files to the target table. The files are
* deleted once the COPY
operation has finished. For more information, see COPY in the Amazon Redshift Database
* Developer Guide.
*
* For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to this * BucketFolder/NetChangesTableID path. *
* * @return An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target * Redshift cluster. *
* For full load mode, DMS converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
command to upload the .csv
* files to the target table. The files are deleted once the COPY
operation has finished. For
* more information, see COPY in
* the Amazon Redshift Database Developer Guide.
*
* For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to * this BucketFolder/NetChangesTableID path. */ public String getBucketFolder() { return this.bucketFolder; } /** *
* An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift * cluster. *
*
* For full load mode, DMS converts source records into .csv files and loads them to the BucketFolder/TableID
* path. DMS uses the Redshift COPY
command to upload the .csv files to the target table. The files are
* deleted once the COPY
operation has finished. For more information, see COPY in the Amazon Redshift Database
* Developer Guide.
*
* For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to this * BucketFolder/NetChangesTableID path. *
* * @param bucketFolder * An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target * Redshift cluster. *
* For full load mode, DMS converts source records into .csv files and loads them to the
* BucketFolder/TableID path. DMS uses the Redshift COPY
command to upload the .csv files
* to the target table. The files are deleted once the COPY
operation has finished. For more
* information, see COPY in the
* Amazon Redshift Database Developer Guide.
*
* For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to * this BucketFolder/NetChangesTableID path. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withBucketFolder(String bucketFolder) { setBucketFolder(bucketFolder); return this; } /** *
* The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift. *
* * @param bucketName * The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift. */ public void setBucketName(String bucketName) { this.bucketName = bucketName; } /** ** The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift. *
* * @return The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift. */ public String getBucketName() { return this.bucketName; } /** ** The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift. *
* * @param bucketName * The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withBucketName(String bucketName) { setBucketName(bucketName); return this; } /** *
* If Amazon Redshift is configured to support case sensitive schema names, set CaseSensitiveNames
to
* true
. The default is false
.
*
CaseSensitiveNames
to true
. The default is false
.
*/
public void setCaseSensitiveNames(Boolean caseSensitiveNames) {
this.caseSensitiveNames = caseSensitiveNames;
}
/**
*
* If Amazon Redshift is configured to support case sensitive schema names, set CaseSensitiveNames
to
* true
. The default is false
.
*
CaseSensitiveNames
to true
. The default is false
.
*/
public Boolean getCaseSensitiveNames() {
return this.caseSensitiveNames;
}
/**
*
* If Amazon Redshift is configured to support case sensitive schema names, set CaseSensitiveNames
to
* true
. The default is false
.
*
CaseSensitiveNames
to true
. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withCaseSensitiveNames(Boolean caseSensitiveNames) {
setCaseSensitiveNames(caseSensitiveNames);
return this;
}
/**
*
* If Amazon Redshift is configured to support case sensitive schema names, set CaseSensitiveNames
to
* true
. The default is false
.
*
CaseSensitiveNames
to true
. The default is false
.
*/
public Boolean isCaseSensitiveNames() {
return this.caseSensitiveNames;
}
/**
*
* If you set CompUpdate
to true
Amazon Redshift applies automatic compression if the
* table is empty. This applies even if the table columns already have encodings other than RAW
. If you
* set CompUpdate
to false
, automatic compression is disabled and existing column
* encodings aren't changed. The default is true
.
*
CompUpdate
to true
Amazon Redshift applies automatic compression if
* the table is empty. This applies even if the table columns already have encodings other than
* RAW
. If you set CompUpdate
to false
, automatic compression is
* disabled and existing column encodings aren't changed. The default is true
.
*/
public void setCompUpdate(Boolean compUpdate) {
this.compUpdate = compUpdate;
}
/**
*
* If you set CompUpdate
to true
Amazon Redshift applies automatic compression if the
* table is empty. This applies even if the table columns already have encodings other than RAW
. If you
* set CompUpdate
to false
, automatic compression is disabled and existing column
* encodings aren't changed. The default is true
.
*
CompUpdate
to true
Amazon Redshift applies automatic compression if
* the table is empty. This applies even if the table columns already have encodings other than
* RAW
. If you set CompUpdate
to false
, automatic compression is
* disabled and existing column encodings aren't changed. The default is true
.
*/
public Boolean getCompUpdate() {
return this.compUpdate;
}
/**
*
* If you set CompUpdate
to true
Amazon Redshift applies automatic compression if the
* table is empty. This applies even if the table columns already have encodings other than RAW
. If you
* set CompUpdate
to false
, automatic compression is disabled and existing column
* encodings aren't changed. The default is true
.
*
CompUpdate
to true
Amazon Redshift applies automatic compression if
* the table is empty. This applies even if the table columns already have encodings other than
* RAW
. If you set CompUpdate
to false
, automatic compression is
* disabled and existing column encodings aren't changed. The default is true
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withCompUpdate(Boolean compUpdate) {
setCompUpdate(compUpdate);
return this;
}
/**
*
* If you set CompUpdate
to true
Amazon Redshift applies automatic compression if the
* table is empty. This applies even if the table columns already have encodings other than RAW
. If you
* set CompUpdate
to false
, automatic compression is disabled and existing column
* encodings aren't changed. The default is true
.
*
CompUpdate
to true
Amazon Redshift applies automatic compression if
* the table is empty. This applies even if the table columns already have encodings other than
* RAW
. If you set CompUpdate
to false
, automatic compression is
* disabled and existing column encodings aren't changed. The default is true
.
*/
public Boolean isCompUpdate() {
return this.compUpdate;
}
/**
* * A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you * initially establish a connection. *
* * @param connectionTimeout * A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you * initially establish a connection. */ public void setConnectionTimeout(Integer connectionTimeout) { this.connectionTimeout = connectionTimeout; } /** ** A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you * initially establish a connection. *
* * @return A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you * initially establish a connection. */ public Integer getConnectionTimeout() { return this.connectionTimeout; } /** ** A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you * initially establish a connection. *
* * @param connectionTimeout * A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you * initially establish a connection. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withConnectionTimeout(Integer connectionTimeout) { setConnectionTimeout(connectionTimeout); return this; } /** ** The name of the Amazon Redshift data warehouse (service) that you are working with. *
* * @param databaseName * The name of the Amazon Redshift data warehouse (service) that you are working with. */ public void setDatabaseName(String databaseName) { this.databaseName = databaseName; } /** ** The name of the Amazon Redshift data warehouse (service) that you are working with. *
* * @return The name of the Amazon Redshift data warehouse (service) that you are working with. */ public String getDatabaseName() { return this.databaseName; } /** ** The name of the Amazon Redshift data warehouse (service) that you are working with. *
* * @param databaseName * The name of the Amazon Redshift data warehouse (service) that you are working with. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withDatabaseName(String databaseName) { setDatabaseName(databaseName); return this; } /** *
* The date format that you are using. Valid values are auto
(case-sensitive), your date format string
* enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'.
* Using auto
recognizes most strings, even some that aren't supported when you use a date format
* string.
*
* If your date and time values use formats different from each other, set this to auto
.
*
auto
(case-sensitive), your date format
* string enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of
* 'YYYY-MM-DD'. Using auto
recognizes most strings, even some that aren't supported when you
* use a date format string.
*
* If your date and time values use formats different from each other, set this to auto
.
*/
public void setDateFormat(String dateFormat) {
this.dateFormat = dateFormat;
}
/**
*
* The date format that you are using. Valid values are auto
(case-sensitive), your date format string
* enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'.
* Using auto
recognizes most strings, even some that aren't supported when you use a date format
* string.
*
* If your date and time values use formats different from each other, set this to auto
.
*
auto
(case-sensitive), your date format
* string enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of
* 'YYYY-MM-DD'. Using auto
recognizes most strings, even some that aren't supported when you
* use a date format string.
*
* If your date and time values use formats different from each other, set this to auto
.
*/
public String getDateFormat() {
return this.dateFormat;
}
/**
*
* The date format that you are using. Valid values are auto
(case-sensitive), your date format string
* enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'.
* Using auto
recognizes most strings, even some that aren't supported when you use a date format
* string.
*
* If your date and time values use formats different from each other, set this to auto
.
*
auto
(case-sensitive), your date format
* string enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of
* 'YYYY-MM-DD'. Using auto
recognizes most strings, even some that aren't supported when you
* use a date format string.
*
* If your date and time values use formats different from each other, set this to auto
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withDateFormat(String dateFormat) {
setDateFormat(dateFormat);
return this;
}
/**
*
* A value that specifies whether DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of
* true
sets empty CHAR and VARCHAR fields to null. The default is false
.
*
true
sets empty CHAR and VARCHAR fields to null. The default is false
.
*/
public void setEmptyAsNull(Boolean emptyAsNull) {
this.emptyAsNull = emptyAsNull;
}
/**
*
* A value that specifies whether DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of
* true
sets empty CHAR and VARCHAR fields to null. The default is false
.
*
true
sets empty CHAR and VARCHAR fields to null. The default is false
.
*/
public Boolean getEmptyAsNull() {
return this.emptyAsNull;
}
/**
*
* A value that specifies whether DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of
* true
sets empty CHAR and VARCHAR fields to null. The default is false
.
*
true
sets empty CHAR and VARCHAR fields to null. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withEmptyAsNull(Boolean emptyAsNull) {
setEmptyAsNull(emptyAsNull);
return this;
}
/**
*
* A value that specifies whether DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of
* true
sets empty CHAR and VARCHAR fields to null. The default is false
.
*
true
sets empty CHAR and VARCHAR fields to null. The default is false
.
*/
public Boolean isEmptyAsNull() {
return this.emptyAsNull;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
*
SSE_S3
(the default) or SSE_KMS
.
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
* @see EncryptionModeValue
*/
public void setEncryptionMode(String encryptionMode) {
this.encryptionMode = encryptionMode;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
*
SSE_S3
(the default) or SSE_KMS
.
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
* @see EncryptionModeValue
*/
public String getEncryptionMode() {
return this.encryptionMode;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
*
SSE_S3
(the default) or SSE_KMS
.
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncryptionModeValue
*/
public RedshiftSettings withEncryptionMode(String encryptionMode) {
setEncryptionMode(encryptionMode);
return this;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
*
SSE_S3
(the default) or SSE_KMS
.
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
* To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows
* "arn:aws:s3:::*"
to use the following actions: "s3:PutObject", "s3:ListBucket"
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncryptionModeValue
*/
public RedshiftSettings withEncryptionMode(EncryptionModeValue encryptionMode) {
this.encryptionMode = encryptionMode.toString();
return this;
}
/**
*
* This setting is only valid for a full-load migration task. Set ExplicitIds
to true
to
* have tables with IDENTITY
columns override their auto-generated values with explicit values loaded
* from the source data files used to populate the tables. The default is false
.
*
ExplicitIds
to
* true
to have tables with IDENTITY
columns override their auto-generated values
* with explicit values loaded from the source data files used to populate the tables. The default is
* false
.
*/
public void setExplicitIds(Boolean explicitIds) {
this.explicitIds = explicitIds;
}
/**
*
* This setting is only valid for a full-load migration task. Set ExplicitIds
to true
to
* have tables with IDENTITY
columns override their auto-generated values with explicit values loaded
* from the source data files used to populate the tables. The default is false
.
*
ExplicitIds
to
* true
to have tables with IDENTITY
columns override their auto-generated values
* with explicit values loaded from the source data files used to populate the tables. The default is
* false
.
*/
public Boolean getExplicitIds() {
return this.explicitIds;
}
/**
*
* This setting is only valid for a full-load migration task. Set ExplicitIds
to true
to
* have tables with IDENTITY
columns override their auto-generated values with explicit values loaded
* from the source data files used to populate the tables. The default is false
.
*
ExplicitIds
to
* true
to have tables with IDENTITY
columns override their auto-generated values
* with explicit values loaded from the source data files used to populate the tables. The default is
* false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withExplicitIds(Boolean explicitIds) {
setExplicitIds(explicitIds);
return this;
}
/**
*
* This setting is only valid for a full-load migration task. Set ExplicitIds
to true
to
* have tables with IDENTITY
columns override their auto-generated values with explicit values loaded
* from the source data files used to populate the tables. The default is false
.
*
ExplicitIds
to
* true
to have tables with IDENTITY
columns override their auto-generated values
* with explicit values loaded from the source data files used to populate the tables. The default is
* false
.
*/
public Boolean isExplicitIds() {
return this.explicitIds;
}
/**
* * The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults * to 10. *
** The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart Upload. For * more information, see Multipart upload * overview. *
*
* FileTransferUploadStreams
accepts a value from 1 through 64. It defaults to 10.
*
* The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart * Upload. For more information, see Multipart upload overview. *
*
* FileTransferUploadStreams
accepts a value from 1 through 64. It defaults to 10.
*/
public void setFileTransferUploadStreams(Integer fileTransferUploadStreams) {
this.fileTransferUploadStreams = fileTransferUploadStreams;
}
/**
*
* The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults * to 10. *
** The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart Upload. For * more information, see Multipart upload * overview. *
*
* FileTransferUploadStreams
accepts a value from 1 through 64. It defaults to 10.
*
* The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart * Upload. For more information, see Multipart upload overview. *
*
* FileTransferUploadStreams
accepts a value from 1 through 64. It defaults to 10.
*/
public Integer getFileTransferUploadStreams() {
return this.fileTransferUploadStreams;
}
/**
*
* The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults * to 10. *
** The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart Upload. For * more information, see Multipart upload * overview. *
*
* FileTransferUploadStreams
accepts a value from 1 through 64. It defaults to 10.
*
* The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart * Upload. For more information, see Multipart upload overview. *
*
* FileTransferUploadStreams
accepts a value from 1 through 64. It defaults to 10.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withFileTransferUploadStreams(Integer fileTransferUploadStreams) {
setFileTransferUploadStreams(fileTransferUploadStreams);
return this;
}
/**
*
* The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a Redshift * cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE. *
* * @param loadTimeout * The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a * Redshift cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE. */ public void setLoadTimeout(Integer loadTimeout) { this.loadTimeout = loadTimeout; } /** ** The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a Redshift * cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE. *
* * @return The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a * Redshift cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE. */ public Integer getLoadTimeout() { return this.loadTimeout; } /** ** The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a Redshift * cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE. *
* * @param loadTimeout * The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a * Redshift cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withLoadTimeout(Integer loadTimeout) { setLoadTimeout(loadTimeout); return this; } /** ** The maximum size (in KB) of any .csv file used to load data on an S3 bucket and transfer data to Amazon Redshift. * It defaults to 1048576KB (1 GB). *
* * @param maxFileSize * The maximum size (in KB) of any .csv file used to load data on an S3 bucket and transfer data to Amazon * Redshift. It defaults to 1048576KB (1 GB). */ public void setMaxFileSize(Integer maxFileSize) { this.maxFileSize = maxFileSize; } /** ** The maximum size (in KB) of any .csv file used to load data on an S3 bucket and transfer data to Amazon Redshift. * It defaults to 1048576KB (1 GB). *
* * @return The maximum size (in KB) of any .csv file used to load data on an S3 bucket and transfer data to Amazon * Redshift. It defaults to 1048576KB (1 GB). */ public Integer getMaxFileSize() { return this.maxFileSize; } /** ** The maximum size (in KB) of any .csv file used to load data on an S3 bucket and transfer data to Amazon Redshift. * It defaults to 1048576KB (1 GB). *
* * @param maxFileSize * The maximum size (in KB) of any .csv file used to load data on an S3 bucket and transfer data to Amazon * Redshift. It defaults to 1048576KB (1 GB). * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withMaxFileSize(Integer maxFileSize) { setMaxFileSize(maxFileSize); return this; } /** *
* The password for the user named in the username
property.
*
username
property.
*/
public void setPassword(String password) {
this.password = password;
}
/**
*
* The password for the user named in the username
property.
*
username
property.
*/
public String getPassword() {
return this.password;
}
/**
*
* The password for the user named in the username
property.
*
username
property.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withPassword(String password) {
setPassword(password);
return this;
}
/**
* * The port number for Amazon Redshift. The default value is 5439. *
* * @param port * The port number for Amazon Redshift. The default value is 5439. */ public void setPort(Integer port) { this.port = port; } /** ** The port number for Amazon Redshift. The default value is 5439. *
* * @return The port number for Amazon Redshift. The default value is 5439. */ public Integer getPort() { return this.port; } /** ** The port number for Amazon Redshift. The default value is 5439. *
* * @param port * The port number for Amazon Redshift. The default value is 5439. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withPort(Integer port) { setPort(port); return this; } /** *
* A value that specifies to remove surrounding quotation marks from strings in the incoming data. All characters
* within the quotation marks, including delimiters, are retained. Choose true
to remove quotation
* marks. The default is false
.
*
true
to
* remove quotation marks. The default is false
.
*/
public void setRemoveQuotes(Boolean removeQuotes) {
this.removeQuotes = removeQuotes;
}
/**
*
* A value that specifies to remove surrounding quotation marks from strings in the incoming data. All characters
* within the quotation marks, including delimiters, are retained. Choose true
to remove quotation
* marks. The default is false
.
*
true
to
* remove quotation marks. The default is false
.
*/
public Boolean getRemoveQuotes() {
return this.removeQuotes;
}
/**
*
* A value that specifies to remove surrounding quotation marks from strings in the incoming data. All characters
* within the quotation marks, including delimiters, are retained. Choose true
to remove quotation
* marks. The default is false
.
*
true
to
* remove quotation marks. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withRemoveQuotes(Boolean removeQuotes) {
setRemoveQuotes(removeQuotes);
return this;
}
/**
*
* A value that specifies to remove surrounding quotation marks from strings in the incoming data. All characters
* within the quotation marks, including delimiters, are retained. Choose true
to remove quotation
* marks. The default is false
.
*
true
to
* remove quotation marks. The default is false
.
*/
public Boolean isRemoveQuotes() {
return this.removeQuotes;
}
/**
*
* A list of characters that you want to replace. Use with ReplaceChars
.
*
ReplaceChars
.
*/
public void setReplaceInvalidChars(String replaceInvalidChars) {
this.replaceInvalidChars = replaceInvalidChars;
}
/**
*
* A list of characters that you want to replace. Use with ReplaceChars
.
*
ReplaceChars
.
*/
public String getReplaceInvalidChars() {
return this.replaceInvalidChars;
}
/**
*
* A list of characters that you want to replace. Use with ReplaceChars
.
*
ReplaceChars
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withReplaceInvalidChars(String replaceInvalidChars) {
setReplaceInvalidChars(replaceInvalidChars);
return this;
}
/**
*
* A value that specifies to replaces the invalid characters specified in ReplaceInvalidChars
,
* substituting the specified characters instead. The default is "?"
.
*
ReplaceInvalidChars
,
* substituting the specified characters instead. The default is "?"
.
*/
public void setReplaceChars(String replaceChars) {
this.replaceChars = replaceChars;
}
/**
*
* A value that specifies to replaces the invalid characters specified in ReplaceInvalidChars
,
* substituting the specified characters instead. The default is "?"
.
*
ReplaceInvalidChars
,
* substituting the specified characters instead. The default is "?"
.
*/
public String getReplaceChars() {
return this.replaceChars;
}
/**
*
* A value that specifies to replaces the invalid characters specified in ReplaceInvalidChars
,
* substituting the specified characters instead. The default is "?"
.
*
ReplaceInvalidChars
,
* substituting the specified characters instead. The default is "?"
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withReplaceChars(String replaceChars) {
setReplaceChars(replaceChars);
return this;
}
/**
* * The name of the Amazon Redshift cluster you are using. *
* * @param serverName * The name of the Amazon Redshift cluster you are using. */ public void setServerName(String serverName) { this.serverName = serverName; } /** ** The name of the Amazon Redshift cluster you are using. *
* * @return The name of the Amazon Redshift cluster you are using. */ public String getServerName() { return this.serverName; } /** ** The name of the Amazon Redshift cluster you are using. *
* * @param serverName * The name of the Amazon Redshift cluster you are using. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withServerName(String serverName) { setServerName(serverName); return this; } /** *
* The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service. The role must
* allow the iam:PassRole
action.
*
iam:PassRole
action.
*/
public void setServiceAccessRoleArn(String serviceAccessRoleArn) {
this.serviceAccessRoleArn = serviceAccessRoleArn;
}
/**
*
* The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service. The role must
* allow the iam:PassRole
action.
*
iam:PassRole
action.
*/
public String getServiceAccessRoleArn() {
return this.serviceAccessRoleArn;
}
/**
*
* The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service. The role must
* allow the iam:PassRole
action.
*
iam:PassRole
action.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withServiceAccessRoleArn(String serviceAccessRoleArn) {
setServiceAccessRoleArn(serviceAccessRoleArn);
return this;
}
/**
*
* The KMS key ID. If you are using SSE_KMS
for the EncryptionMode
, provide this key ID.
* The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.
*
SSE_KMS
for the EncryptionMode
, provide this
* key ID. The key that you use needs an attached policy that enables IAM user permissions and allows use of
* the key.
*/
public void setServerSideEncryptionKmsKeyId(String serverSideEncryptionKmsKeyId) {
this.serverSideEncryptionKmsKeyId = serverSideEncryptionKmsKeyId;
}
/**
*
* The KMS key ID. If you are using SSE_KMS
for the EncryptionMode
, provide this key ID.
* The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.
*
SSE_KMS
for the EncryptionMode
, provide this
* key ID. The key that you use needs an attached policy that enables IAM user permissions and allows use of
* the key.
*/
public String getServerSideEncryptionKmsKeyId() {
return this.serverSideEncryptionKmsKeyId;
}
/**
*
* The KMS key ID. If you are using SSE_KMS
for the EncryptionMode
, provide this key ID.
* The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.
*
SSE_KMS
for the EncryptionMode
, provide this
* key ID. The key that you use needs an attached policy that enables IAM user permissions and allows use of
* the key.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withServerSideEncryptionKmsKeyId(String serverSideEncryptionKmsKeyId) {
setServerSideEncryptionKmsKeyId(serverSideEncryptionKmsKeyId);
return this;
}
/**
*
* The time format that you want to use. Valid values are auto
(case-sensitive),
* 'timeformat_string'
, 'epochsecs'
, or 'epochmillisecs'
. It defaults to 10.
* Using auto
recognizes most strings, even some that aren't supported when you use a time format
* string.
*
* If your date and time values use formats different from each other, set this parameter to auto
.
*
auto
(case-sensitive),
* 'timeformat_string'
, 'epochsecs'
, or 'epochmillisecs'
. It defaults
* to 10. Using auto
recognizes most strings, even some that aren't supported when you use a
* time format string.
*
* If your date and time values use formats different from each other, set this parameter to
* auto
.
*/
public void setTimeFormat(String timeFormat) {
this.timeFormat = timeFormat;
}
/**
*
* The time format that you want to use. Valid values are auto
(case-sensitive),
* 'timeformat_string'
, 'epochsecs'
, or 'epochmillisecs'
. It defaults to 10.
* Using auto
recognizes most strings, even some that aren't supported when you use a time format
* string.
*
* If your date and time values use formats different from each other, set this parameter to auto
.
*
auto
(case-sensitive),
* 'timeformat_string'
, 'epochsecs'
, or 'epochmillisecs'
. It defaults
* to 10. Using auto
recognizes most strings, even some that aren't supported when you use a
* time format string.
*
* If your date and time values use formats different from each other, set this parameter to
* auto
.
*/
public String getTimeFormat() {
return this.timeFormat;
}
/**
*
* The time format that you want to use. Valid values are auto
(case-sensitive),
* 'timeformat_string'
, 'epochsecs'
, or 'epochmillisecs'
. It defaults to 10.
* Using auto
recognizes most strings, even some that aren't supported when you use a time format
* string.
*
* If your date and time values use formats different from each other, set this parameter to auto
.
*
auto
(case-sensitive),
* 'timeformat_string'
, 'epochsecs'
, or 'epochmillisecs'
. It defaults
* to 10. Using auto
recognizes most strings, even some that aren't supported when you use a
* time format string.
*
* If your date and time values use formats different from each other, set this parameter to
* auto
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withTimeFormat(String timeFormat) {
setTimeFormat(timeFormat);
return this;
}
/**
*
* A value that specifies to remove the trailing white space characters from a VARCHAR string. This parameter
* applies only to columns with a VARCHAR data type. Choose true
to remove unneeded white space. The
* default is false
.
*
true
to remove unneeded white space.
* The default is false
.
*/
public void setTrimBlanks(Boolean trimBlanks) {
this.trimBlanks = trimBlanks;
}
/**
*
* A value that specifies to remove the trailing white space characters from a VARCHAR string. This parameter
* applies only to columns with a VARCHAR data type. Choose true
to remove unneeded white space. The
* default is false
.
*
true
to remove unneeded
* white space. The default is false
.
*/
public Boolean getTrimBlanks() {
return this.trimBlanks;
}
/**
*
* A value that specifies to remove the trailing white space characters from a VARCHAR string. This parameter
* applies only to columns with a VARCHAR data type. Choose true
to remove unneeded white space. The
* default is false
.
*
true
to remove unneeded white space.
* The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withTrimBlanks(Boolean trimBlanks) {
setTrimBlanks(trimBlanks);
return this;
}
/**
*
* A value that specifies to remove the trailing white space characters from a VARCHAR string. This parameter
* applies only to columns with a VARCHAR data type. Choose true
to remove unneeded white space. The
* default is false
.
*
true
to remove unneeded
* white space. The default is false
.
*/
public Boolean isTrimBlanks() {
return this.trimBlanks;
}
/**
*
* A value that specifies to truncate data in columns to the appropriate number of characters, so that the data fits
* in the column. This parameter applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4
* MB or less. Choose true
to truncate data. The default is false
.
*
true
to truncate data. The default is false
.
*/
public void setTruncateColumns(Boolean truncateColumns) {
this.truncateColumns = truncateColumns;
}
/**
*
* A value that specifies to truncate data in columns to the appropriate number of characters, so that the data fits
* in the column. This parameter applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4
* MB or less. Choose true
to truncate data. The default is false
.
*
true
to truncate data. The default is
* false
.
*/
public Boolean getTruncateColumns() {
return this.truncateColumns;
}
/**
*
* A value that specifies to truncate data in columns to the appropriate number of characters, so that the data fits
* in the column. This parameter applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4
* MB or less. Choose true
to truncate data. The default is false
.
*
true
to truncate data. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withTruncateColumns(Boolean truncateColumns) {
setTruncateColumns(truncateColumns);
return this;
}
/**
*
* A value that specifies to truncate data in columns to the appropriate number of characters, so that the data fits
* in the column. This parameter applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4
* MB or less. Choose true
to truncate data. The default is false
.
*
true
to truncate data. The default is
* false
.
*/
public Boolean isTruncateColumns() {
return this.truncateColumns;
}
/**
* * An Amazon Redshift user name for a registered user. *
* * @param username * An Amazon Redshift user name for a registered user. */ public void setUsername(String username) { this.username = username; } /** ** An Amazon Redshift user name for a registered user. *
* * @return An Amazon Redshift user name for a registered user. */ public String getUsername() { return this.username; } /** ** An Amazon Redshift user name for a registered user. *
* * @param username * An Amazon Redshift user name for a registered user. * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withUsername(String username) { setUsername(username); return this; } /** ** The size (in KB) of the in-memory file write buffer used when generating .csv files on the local disk at the DMS * replication instance. The default value is 1000 (buffer size is 1000KB). *
* * @param writeBufferSize * The size (in KB) of the in-memory file write buffer used when generating .csv files on the local disk at * the DMS replication instance. The default value is 1000 (buffer size is 1000KB). */ public void setWriteBufferSize(Integer writeBufferSize) { this.writeBufferSize = writeBufferSize; } /** ** The size (in KB) of the in-memory file write buffer used when generating .csv files on the local disk at the DMS * replication instance. The default value is 1000 (buffer size is 1000KB). *
* * @return The size (in KB) of the in-memory file write buffer used when generating .csv files on the local disk at * the DMS replication instance. The default value is 1000 (buffer size is 1000KB). */ public Integer getWriteBufferSize() { return this.writeBufferSize; } /** ** The size (in KB) of the in-memory file write buffer used when generating .csv files on the local disk at the DMS * replication instance. The default value is 1000 (buffer size is 1000KB). *
* * @param writeBufferSize * The size (in KB) of the in-memory file write buffer used when generating .csv files on the local disk at * the DMS replication instance. The default value is 1000 (buffer size is 1000KB). * @return Returns a reference to this object so that method calls can be chained together. */ public RedshiftSettings withWriteBufferSize(Integer writeBufferSize) { setWriteBufferSize(writeBufferSize); return this; } /** *
* The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the
* required permissions to access the value in SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services
* Secrets Manager secret that allows access to the Amazon Redshift endpoint.
*
* You can specify one of two sets of values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for UserName
,
* Password
, ServerName
, and Port
. You can't specify both. For more
* information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
* and SecretsManagerSecretId
required to access it, see Using
* secrets to access Database Migration Service resources in the Database Migration Service User Guide.
*
SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web
* Services Secrets Manager secret that allows access to the Amazon Redshift endpoint.
* You can specify one of two sets of values for these permissions. You can specify the values for this
* setting and SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and Port
. You can't
* specify both. For more information on creating this SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it,
* see Using secrets to access Database Migration Service resources in the Database Migration Service
* User Guide.
*
* The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the
* required permissions to access the value in SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services
* Secrets Manager secret that allows access to the Amazon Redshift endpoint.
*
* You can specify one of two sets of values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for UserName
,
* Password
, ServerName
, and Port
. You can't specify both. For more
* information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
* and SecretsManagerSecretId
required to access it, see Using
* secrets to access Database Migration Service resources in the Database Migration Service User Guide.
*
SecretsManagerSecret
. The role must allow
* the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web
* Services Secrets Manager secret that allows access to the Amazon Redshift endpoint.
* You can specify one of two sets of values for these permissions. You can specify the values for this
* setting and SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and Port
. You can't
* specify both. For more information on creating this SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it,
* see Using secrets to access Database Migration Service resources in the Database Migration Service
* User Guide.
*
* The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the
* required permissions to access the value in SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services
* Secrets Manager secret that allows access to the Amazon Redshift endpoint.
*
* You can specify one of two sets of values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for UserName
,
* Password
, ServerName
, and Port
. You can't specify both. For more
* information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
* and SecretsManagerSecretId
required to access it, see Using
* secrets to access Database Migration Service resources in the Database Migration Service User Guide.
*
SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web
* Services Secrets Manager secret that allows access to the Amazon Redshift endpoint.
* You can specify one of two sets of values for these permissions. You can specify the values for this
* setting and SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and Port
. You can't
* specify both. For more information on creating this SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it,
* see Using secrets to access Database Migration Service resources in the Database Migration Service
* User Guide.
*
* The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the Amazon
* Redshift endpoint connection details.
*
SecretsManagerSecret
that contains the
* Amazon Redshift endpoint connection details.
*/
public void setSecretsManagerSecretId(String secretsManagerSecretId) {
this.secretsManagerSecretId = secretsManagerSecretId;
}
/**
*
* The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the Amazon
* Redshift endpoint connection details.
*
SecretsManagerSecret
that contains the
* Amazon Redshift endpoint connection details.
*/
public String getSecretsManagerSecretId() {
return this.secretsManagerSecretId;
}
/**
*
* The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the Amazon
* Redshift endpoint connection details.
*
SecretsManagerSecret
that contains the
* Amazon Redshift endpoint connection details.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withSecretsManagerSecretId(String secretsManagerSecretId) {
setSecretsManagerSecretId(secretsManagerSecretId);
return this;
}
/**
*
* When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as
* varchar(1)
.
*
varchar(1)
.
*/
public void setMapBooleanAsBoolean(Boolean mapBooleanAsBoolean) {
this.mapBooleanAsBoolean = mapBooleanAsBoolean;
}
/**
*
* When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as
* varchar(1)
.
*
varchar(1)
.
*/
public Boolean getMapBooleanAsBoolean() {
return this.mapBooleanAsBoolean;
}
/**
*
* When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as
* varchar(1)
.
*
varchar(1)
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RedshiftSettings withMapBooleanAsBoolean(Boolean mapBooleanAsBoolean) {
setMapBooleanAsBoolean(mapBooleanAsBoolean);
return this;
}
/**
*
* When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as
* varchar(1)
.
*
varchar(1)
.
*/
public Boolean isMapBooleanAsBoolean() {
return this.mapBooleanAsBoolean;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getAcceptAnyDate() != null)
sb.append("AcceptAnyDate: ").append(getAcceptAnyDate()).append(",");
if (getAfterConnectScript() != null)
sb.append("AfterConnectScript: ").append(getAfterConnectScript()).append(",");
if (getBucketFolder() != null)
sb.append("BucketFolder: ").append(getBucketFolder()).append(",");
if (getBucketName() != null)
sb.append("BucketName: ").append(getBucketName()).append(",");
if (getCaseSensitiveNames() != null)
sb.append("CaseSensitiveNames: ").append(getCaseSensitiveNames()).append(",");
if (getCompUpdate() != null)
sb.append("CompUpdate: ").append(getCompUpdate()).append(",");
if (getConnectionTimeout() != null)
sb.append("ConnectionTimeout: ").append(getConnectionTimeout()).append(",");
if (getDatabaseName() != null)
sb.append("DatabaseName: ").append(getDatabaseName()).append(",");
if (getDateFormat() != null)
sb.append("DateFormat: ").append(getDateFormat()).append(",");
if (getEmptyAsNull() != null)
sb.append("EmptyAsNull: ").append(getEmptyAsNull()).append(",");
if (getEncryptionMode() != null)
sb.append("EncryptionMode: ").append(getEncryptionMode()).append(",");
if (getExplicitIds() != null)
sb.append("ExplicitIds: ").append(getExplicitIds()).append(",");
if (getFileTransferUploadStreams() != null)
sb.append("FileTransferUploadStreams: ").append(getFileTransferUploadStreams()).append(",");
if (getLoadTimeout() != null)
sb.append("LoadTimeout: ").append(getLoadTimeout()).append(",");
if (getMaxFileSize() != null)
sb.append("MaxFileSize: ").append(getMaxFileSize()).append(",");
if (getPassword() != null)
sb.append("Password: ").append("***Sensitive Data Redacted***").append(",");
if (getPort() != null)
sb.append("Port: ").append(getPort()).append(",");
if (getRemoveQuotes() != null)
sb.append("RemoveQuotes: ").append(getRemoveQuotes()).append(",");
if (getReplaceInvalidChars() != null)
sb.append("ReplaceInvalidChars: ").append(getReplaceInvalidChars()).append(",");
if (getReplaceChars() != null)
sb.append("ReplaceChars: ").append(getReplaceChars()).append(",");
if (getServerName() != null)
sb.append("ServerName: ").append(getServerName()).append(",");
if (getServiceAccessRoleArn() != null)
sb.append("ServiceAccessRoleArn: ").append(getServiceAccessRoleArn()).append(",");
if (getServerSideEncryptionKmsKeyId() != null)
sb.append("ServerSideEncryptionKmsKeyId: ").append(getServerSideEncryptionKmsKeyId()).append(",");
if (getTimeFormat() != null)
sb.append("TimeFormat: ").append(getTimeFormat()).append(",");
if (getTrimBlanks() != null)
sb.append("TrimBlanks: ").append(getTrimBlanks()).append(",");
if (getTruncateColumns() != null)
sb.append("TruncateColumns: ").append(getTruncateColumns()).append(",");
if (getUsername() != null)
sb.append("Username: ").append(getUsername()).append(",");
if (getWriteBufferSize() != null)
sb.append("WriteBufferSize: ").append(getWriteBufferSize()).append(",");
if (getSecretsManagerAccessRoleArn() != null)
sb.append("SecretsManagerAccessRoleArn: ").append(getSecretsManagerAccessRoleArn()).append(",");
if (getSecretsManagerSecretId() != null)
sb.append("SecretsManagerSecretId: ").append(getSecretsManagerSecretId()).append(",");
if (getMapBooleanAsBoolean() != null)
sb.append("MapBooleanAsBoolean: ").append(getMapBooleanAsBoolean());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof RedshiftSettings == false)
return false;
RedshiftSettings other = (RedshiftSettings) obj;
if (other.getAcceptAnyDate() == null ^ this.getAcceptAnyDate() == null)
return false;
if (other.getAcceptAnyDate() != null && other.getAcceptAnyDate().equals(this.getAcceptAnyDate()) == false)
return false;
if (other.getAfterConnectScript() == null ^ this.getAfterConnectScript() == null)
return false;
if (other.getAfterConnectScript() != null && other.getAfterConnectScript().equals(this.getAfterConnectScript()) == false)
return false;
if (other.getBucketFolder() == null ^ this.getBucketFolder() == null)
return false;
if (other.getBucketFolder() != null && other.getBucketFolder().equals(this.getBucketFolder()) == false)
return false;
if (other.getBucketName() == null ^ this.getBucketName() == null)
return false;
if (other.getBucketName() != null && other.getBucketName().equals(this.getBucketName()) == false)
return false;
if (other.getCaseSensitiveNames() == null ^ this.getCaseSensitiveNames() == null)
return false;
if (other.getCaseSensitiveNames() != null && other.getCaseSensitiveNames().equals(this.getCaseSensitiveNames()) == false)
return false;
if (other.getCompUpdate() == null ^ this.getCompUpdate() == null)
return false;
if (other.getCompUpdate() != null && other.getCompUpdate().equals(this.getCompUpdate()) == false)
return false;
if (other.getConnectionTimeout() == null ^ this.getConnectionTimeout() == null)
return false;
if (other.getConnectionTimeout() != null && other.getConnectionTimeout().equals(this.getConnectionTimeout()) == false)
return false;
if (other.getDatabaseName() == null ^ this.getDatabaseName() == null)
return false;
if (other.getDatabaseName() != null && other.getDatabaseName().equals(this.getDatabaseName()) == false)
return false;
if (other.getDateFormat() == null ^ this.getDateFormat() == null)
return false;
if (other.getDateFormat() != null && other.getDateFormat().equals(this.getDateFormat()) == false)
return false;
if (other.getEmptyAsNull() == null ^ this.getEmptyAsNull() == null)
return false;
if (other.getEmptyAsNull() != null && other.getEmptyAsNull().equals(this.getEmptyAsNull()) == false)
return false;
if (other.getEncryptionMode() == null ^ this.getEncryptionMode() == null)
return false;
if (other.getEncryptionMode() != null && other.getEncryptionMode().equals(this.getEncryptionMode()) == false)
return false;
if (other.getExplicitIds() == null ^ this.getExplicitIds() == null)
return false;
if (other.getExplicitIds() != null && other.getExplicitIds().equals(this.getExplicitIds()) == false)
return false;
if (other.getFileTransferUploadStreams() == null ^ this.getFileTransferUploadStreams() == null)
return false;
if (other.getFileTransferUploadStreams() != null && other.getFileTransferUploadStreams().equals(this.getFileTransferUploadStreams()) == false)
return false;
if (other.getLoadTimeout() == null ^ this.getLoadTimeout() == null)
return false;
if (other.getLoadTimeout() != null && other.getLoadTimeout().equals(this.getLoadTimeout()) == false)
return false;
if (other.getMaxFileSize() == null ^ this.getMaxFileSize() == null)
return false;
if (other.getMaxFileSize() != null && other.getMaxFileSize().equals(this.getMaxFileSize()) == false)
return false;
if (other.getPassword() == null ^ this.getPassword() == null)
return false;
if (other.getPassword() != null && other.getPassword().equals(this.getPassword()) == false)
return false;
if (other.getPort() == null ^ this.getPort() == null)
return false;
if (other.getPort() != null && other.getPort().equals(this.getPort()) == false)
return false;
if (other.getRemoveQuotes() == null ^ this.getRemoveQuotes() == null)
return false;
if (other.getRemoveQuotes() != null && other.getRemoveQuotes().equals(this.getRemoveQuotes()) == false)
return false;
if (other.getReplaceInvalidChars() == null ^ this.getReplaceInvalidChars() == null)
return false;
if (other.getReplaceInvalidChars() != null && other.getReplaceInvalidChars().equals(this.getReplaceInvalidChars()) == false)
return false;
if (other.getReplaceChars() == null ^ this.getReplaceChars() == null)
return false;
if (other.getReplaceChars() != null && other.getReplaceChars().equals(this.getReplaceChars()) == false)
return false;
if (other.getServerName() == null ^ this.getServerName() == null)
return false;
if (other.getServerName() != null && other.getServerName().equals(this.getServerName()) == false)
return false;
if (other.getServiceAccessRoleArn() == null ^ this.getServiceAccessRoleArn() == null)
return false;
if (other.getServiceAccessRoleArn() != null && other.getServiceAccessRoleArn().equals(this.getServiceAccessRoleArn()) == false)
return false;
if (other.getServerSideEncryptionKmsKeyId() == null ^ this.getServerSideEncryptionKmsKeyId() == null)
return false;
if (other.getServerSideEncryptionKmsKeyId() != null && other.getServerSideEncryptionKmsKeyId().equals(this.getServerSideEncryptionKmsKeyId()) == false)
return false;
if (other.getTimeFormat() == null ^ this.getTimeFormat() == null)
return false;
if (other.getTimeFormat() != null && other.getTimeFormat().equals(this.getTimeFormat()) == false)
return false;
if (other.getTrimBlanks() == null ^ this.getTrimBlanks() == null)
return false;
if (other.getTrimBlanks() != null && other.getTrimBlanks().equals(this.getTrimBlanks()) == false)
return false;
if (other.getTruncateColumns() == null ^ this.getTruncateColumns() == null)
return false;
if (other.getTruncateColumns() != null && other.getTruncateColumns().equals(this.getTruncateColumns()) == false)
return false;
if (other.getUsername() == null ^ this.getUsername() == null)
return false;
if (other.getUsername() != null && other.getUsername().equals(this.getUsername()) == false)
return false;
if (other.getWriteBufferSize() == null ^ this.getWriteBufferSize() == null)
return false;
if (other.getWriteBufferSize() != null && other.getWriteBufferSize().equals(this.getWriteBufferSize()) == false)
return false;
if (other.getSecretsManagerAccessRoleArn() == null ^ this.getSecretsManagerAccessRoleArn() == null)
return false;
if (other.getSecretsManagerAccessRoleArn() != null && other.getSecretsManagerAccessRoleArn().equals(this.getSecretsManagerAccessRoleArn()) == false)
return false;
if (other.getSecretsManagerSecretId() == null ^ this.getSecretsManagerSecretId() == null)
return false;
if (other.getSecretsManagerSecretId() != null && other.getSecretsManagerSecretId().equals(this.getSecretsManagerSecretId()) == false)
return false;
if (other.getMapBooleanAsBoolean() == null ^ this.getMapBooleanAsBoolean() == null)
return false;
if (other.getMapBooleanAsBoolean() != null && other.getMapBooleanAsBoolean().equals(this.getMapBooleanAsBoolean()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getAcceptAnyDate() == null) ? 0 : getAcceptAnyDate().hashCode());
hashCode = prime * hashCode + ((getAfterConnectScript() == null) ? 0 : getAfterConnectScript().hashCode());
hashCode = prime * hashCode + ((getBucketFolder() == null) ? 0 : getBucketFolder().hashCode());
hashCode = prime * hashCode + ((getBucketName() == null) ? 0 : getBucketName().hashCode());
hashCode = prime * hashCode + ((getCaseSensitiveNames() == null) ? 0 : getCaseSensitiveNames().hashCode());
hashCode = prime * hashCode + ((getCompUpdate() == null) ? 0 : getCompUpdate().hashCode());
hashCode = prime * hashCode + ((getConnectionTimeout() == null) ? 0 : getConnectionTimeout().hashCode());
hashCode = prime * hashCode + ((getDatabaseName() == null) ? 0 : getDatabaseName().hashCode());
hashCode = prime * hashCode + ((getDateFormat() == null) ? 0 : getDateFormat().hashCode());
hashCode = prime * hashCode + ((getEmptyAsNull() == null) ? 0 : getEmptyAsNull().hashCode());
hashCode = prime * hashCode + ((getEncryptionMode() == null) ? 0 : getEncryptionMode().hashCode());
hashCode = prime * hashCode + ((getExplicitIds() == null) ? 0 : getExplicitIds().hashCode());
hashCode = prime * hashCode + ((getFileTransferUploadStreams() == null) ? 0 : getFileTransferUploadStreams().hashCode());
hashCode = prime * hashCode + ((getLoadTimeout() == null) ? 0 : getLoadTimeout().hashCode());
hashCode = prime * hashCode + ((getMaxFileSize() == null) ? 0 : getMaxFileSize().hashCode());
hashCode = prime * hashCode + ((getPassword() == null) ? 0 : getPassword().hashCode());
hashCode = prime * hashCode + ((getPort() == null) ? 0 : getPort().hashCode());
hashCode = prime * hashCode + ((getRemoveQuotes() == null) ? 0 : getRemoveQuotes().hashCode());
hashCode = prime * hashCode + ((getReplaceInvalidChars() == null) ? 0 : getReplaceInvalidChars().hashCode());
hashCode = prime * hashCode + ((getReplaceChars() == null) ? 0 : getReplaceChars().hashCode());
hashCode = prime * hashCode + ((getServerName() == null) ? 0 : getServerName().hashCode());
hashCode = prime * hashCode + ((getServiceAccessRoleArn() == null) ? 0 : getServiceAccessRoleArn().hashCode());
hashCode = prime * hashCode + ((getServerSideEncryptionKmsKeyId() == null) ? 0 : getServerSideEncryptionKmsKeyId().hashCode());
hashCode = prime * hashCode + ((getTimeFormat() == null) ? 0 : getTimeFormat().hashCode());
hashCode = prime * hashCode + ((getTrimBlanks() == null) ? 0 : getTrimBlanks().hashCode());
hashCode = prime * hashCode + ((getTruncateColumns() == null) ? 0 : getTruncateColumns().hashCode());
hashCode = prime * hashCode + ((getUsername() == null) ? 0 : getUsername().hashCode());
hashCode = prime * hashCode + ((getWriteBufferSize() == null) ? 0 : getWriteBufferSize().hashCode());
hashCode = prime * hashCode + ((getSecretsManagerAccessRoleArn() == null) ? 0 : getSecretsManagerAccessRoleArn().hashCode());
hashCode = prime * hashCode + ((getSecretsManagerSecretId() == null) ? 0 : getSecretsManagerSecretId().hashCode());
hashCode = prime * hashCode + ((getMapBooleanAsBoolean() == null) ? 0 : getMapBooleanAsBoolean().hashCode());
return hashCode;
}
@Override
public RedshiftSettings clone() {
try {
return (RedshiftSettings) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.databasemigrationservice.model.transform.RedshiftSettingsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}