/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.databasemigrationservice.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* Provides information that defines a PostgreSQL endpoint. *
* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class PostgreSQLSettings implements Serializable, Cloneable, StructuredPojo { /** ** For use with change data capture (CDC) only, this attribute has DMS bypass foreign keys and user triggers to * reduce the time it takes to bulk load data. *
*
* Example: afterConnectScript=SET session_replication_role='replica'
*
* To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can * later remove these artifacts. *
*
* If this value is set to N
, you don't have to create tables or triggers on the source database.
*
* Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. *
*
* Example: maxFileSize=512
*
* Database name for the endpoint. *
*/ private String databaseName; /** ** The schema in which the operational DDL database artifacts are created. *
*
* Example: ddlArtifactsSchema=xyzddlschema;
*
* Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds. *
*
* Example: executeTimeout=100;
*
* When set to true
, this value causes a task to fail if the actual size of a LOB column is greater
* than the specified LobMaxSize
.
*
* If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB * data. *
*/ private Boolean failTasksOnLobTruncation; /** *
* The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical
* replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This
* heartbeat keeps restart_lsn
moving and prevents storage full scenarios.
*
* Sets the schema in which the heartbeat artifacts are created. *
*/ private String heartbeatSchema; /** ** Sets the WAL heartbeat frequency (in minutes). *
*/ private Integer heartbeatFrequency; /** ** Endpoint connection password. *
*/ private String password; /** ** Endpoint TCP port. The default is 5432. *
*/ private Integer port; /** ** The host name of the endpoint database. *
*
* For an Amazon RDS PostgreSQL instance, this is the output of DescribeDBInstances, in the
* Endpoint.Address
* field.
*
* For an Aurora PostgreSQL instance, this is the output of DescribeDBClusters, in the Endpoint
field.
*
* Endpoint connection user name. *
*/ private String username; /** ** Sets the name of a previously created logical replication slot for a change data capture (CDC) load of the * PostgreSQL source instance. *
*
* When used with the CdcStartPosition
request parameter for the DMS API , this attribute also makes it
* possible to use native CDC start points. DMS verifies that the specified logical replication slot exists before
* starting the CDC load task. It also verifies that the task was created with a valid setting of
* CdcStartPosition
. If the specified slot doesn't exist or the task doesn't have a valid
* CdcStartPosition
setting, DMS raises an error.
*
* For more information about setting the CdcStartPosition
request parameter, see Determining a CDC native start point in the Database Migration Service User Guide. For more
* information about using CdcStartPosition
, see CreateReplicationTask, StartReplicationTask, and ModifyReplicationTask.
*
* Specifies the plugin to use to create a replication slot. *
*/ private String pluginName; /** *
* The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the
* required permissions to access the value in SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services
* Secrets Manager secret that allows access to the PostgreSQL endpoint.
*
* You can specify one of two sets of values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for UserName
,
* Password
, ServerName
, and Port
. You can't specify both. For more
* information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
* and SecretsManagerSecretId
required to access it, see Using
* secrets to access Database Migration Service resources in the Database Migration Service User Guide.
*
* The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the PostgreSQL
* endpoint connection details.
*
* Use the TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types during
* migration. The default value is true
.
*
* When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as
* varchar(5)
.
*
* When true, DMS migrates JSONB values as CLOB. *
*/ private Boolean mapJsonbAsClob; /** ** When true, DMS migrates LONG values as VARCHAR. *
*/ private String mapLongVarcharAs; /** ** Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You can use * this setting to specify replication behavior for endpoints that require additional configuration, such as * Babelfish endpoints. *
*/ private String databaseMode; /** ** The Babelfish for Aurora PostgreSQL database name for the endpoint. *
*/ private String babelfishDatabaseName; /** ** For use with change data capture (CDC) only, this attribute has DMS bypass foreign keys and user triggers to * reduce the time it takes to bulk load data. *
*
* Example: afterConnectScript=SET session_replication_role='replica'
*
* Example: afterConnectScript=SET session_replication_role='replica'
*/
public void setAfterConnectScript(String afterConnectScript) {
this.afterConnectScript = afterConnectScript;
}
/**
*
* For use with change data capture (CDC) only, this attribute has DMS bypass foreign keys and user triggers to * reduce the time it takes to bulk load data. *
*
* Example: afterConnectScript=SET session_replication_role='replica'
*
* Example: afterConnectScript=SET session_replication_role='replica'
*/
public String getAfterConnectScript() {
return this.afterConnectScript;
}
/**
*
* For use with change data capture (CDC) only, this attribute has DMS bypass foreign keys and user triggers to * reduce the time it takes to bulk load data. *
*
* Example: afterConnectScript=SET session_replication_role='replica'
*
* Example: afterConnectScript=SET session_replication_role='replica'
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withAfterConnectScript(String afterConnectScript) {
setAfterConnectScript(afterConnectScript);
return this;
}
/**
*
* To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can * later remove these artifacts. *
*
* If this value is set to N
, you don't have to create tables or triggers on the source database.
*
* If this value is set to N
, you don't have to create tables or triggers on the source
* database.
*/
public void setCaptureDdls(Boolean captureDdls) {
this.captureDdls = captureDdls;
}
/**
*
* To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can * later remove these artifacts. *
*
* If this value is set to N
, you don't have to create tables or triggers on the source database.
*
* If this value is set to N
, you don't have to create tables or triggers on the source
* database.
*/
public Boolean getCaptureDdls() {
return this.captureDdls;
}
/**
*
* To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can * later remove these artifacts. *
*
* If this value is set to N
, you don't have to create tables or triggers on the source database.
*
* If this value is set to N
, you don't have to create tables or triggers on the source
* database.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withCaptureDdls(Boolean captureDdls) {
setCaptureDdls(captureDdls);
return this;
}
/**
*
* To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can * later remove these artifacts. *
*
* If this value is set to N
, you don't have to create tables or triggers on the source database.
*
* If this value is set to N
, you don't have to create tables or triggers on the source
* database.
*/
public Boolean isCaptureDdls() {
return this.captureDdls;
}
/**
*
* Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. *
*
* Example: maxFileSize=512
*
* Example: maxFileSize=512
*/
public void setMaxFileSize(Integer maxFileSize) {
this.maxFileSize = maxFileSize;
}
/**
*
* Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. *
*
* Example: maxFileSize=512
*
* Example: maxFileSize=512
*/
public Integer getMaxFileSize() {
return this.maxFileSize;
}
/**
*
* Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. *
*
* Example: maxFileSize=512
*
* Example: maxFileSize=512
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withMaxFileSize(Integer maxFileSize) {
setMaxFileSize(maxFileSize);
return this;
}
/**
*
* Database name for the endpoint. *
* * @param databaseName * Database name for the endpoint. */ public void setDatabaseName(String databaseName) { this.databaseName = databaseName; } /** ** Database name for the endpoint. *
* * @return Database name for the endpoint. */ public String getDatabaseName() { return this.databaseName; } /** ** Database name for the endpoint. *
* * @param databaseName * Database name for the endpoint. * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withDatabaseName(String databaseName) { setDatabaseName(databaseName); return this; } /** ** The schema in which the operational DDL database artifacts are created. *
*
* Example: ddlArtifactsSchema=xyzddlschema;
*
* Example: ddlArtifactsSchema=xyzddlschema;
*/
public void setDdlArtifactsSchema(String ddlArtifactsSchema) {
this.ddlArtifactsSchema = ddlArtifactsSchema;
}
/**
*
* The schema in which the operational DDL database artifacts are created. *
*
* Example: ddlArtifactsSchema=xyzddlschema;
*
* Example: ddlArtifactsSchema=xyzddlschema;
*/
public String getDdlArtifactsSchema() {
return this.ddlArtifactsSchema;
}
/**
*
* The schema in which the operational DDL database artifacts are created. *
*
* Example: ddlArtifactsSchema=xyzddlschema;
*
* Example: ddlArtifactsSchema=xyzddlschema;
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withDdlArtifactsSchema(String ddlArtifactsSchema) {
setDdlArtifactsSchema(ddlArtifactsSchema);
return this;
}
/**
*
* Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds. *
*
* Example: executeTimeout=100;
*
* Example: executeTimeout=100;
*/
public void setExecuteTimeout(Integer executeTimeout) {
this.executeTimeout = executeTimeout;
}
/**
*
* Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds. *
*
* Example: executeTimeout=100;
*
* Example: executeTimeout=100;
*/
public Integer getExecuteTimeout() {
return this.executeTimeout;
}
/**
*
* Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds. *
*
* Example: executeTimeout=100;
*
* Example: executeTimeout=100;
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withExecuteTimeout(Integer executeTimeout) {
setExecuteTimeout(executeTimeout);
return this;
}
/**
*
* When set to true
, this value causes a task to fail if the actual size of a LOB column is greater
* than the specified LobMaxSize
.
*
* If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB * data. *
* * @param failTasksOnLobTruncation * When set totrue
, this value causes a task to fail if the actual size of a LOB column is
* greater than the specified LobMaxSize
.
* * If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating * the LOB data. */ public void setFailTasksOnLobTruncation(Boolean failTasksOnLobTruncation) { this.failTasksOnLobTruncation = failTasksOnLobTruncation; } /** *
* When set to true
, this value causes a task to fail if the actual size of a LOB column is greater
* than the specified LobMaxSize
.
*
* If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB * data. *
* * @return When set totrue
, this value causes a task to fail if the actual size of a LOB column is
* greater than the specified LobMaxSize
.
* * If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating * the LOB data. */ public Boolean getFailTasksOnLobTruncation() { return this.failTasksOnLobTruncation; } /** *
* When set to true
, this value causes a task to fail if the actual size of a LOB column is greater
* than the specified LobMaxSize
.
*
* If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB * data. *
* * @param failTasksOnLobTruncation * When set totrue
, this value causes a task to fail if the actual size of a LOB column is
* greater than the specified LobMaxSize
.
* * If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating * the LOB data. * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withFailTasksOnLobTruncation(Boolean failTasksOnLobTruncation) { setFailTasksOnLobTruncation(failTasksOnLobTruncation); return this; } /** *
* When set to true
, this value causes a task to fail if the actual size of a LOB column is greater
* than the specified LobMaxSize
.
*
* If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB * data. *
* * @return When set totrue
, this value causes a task to fail if the actual size of a LOB column is
* greater than the specified LobMaxSize
.
* * If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating * the LOB data. */ public Boolean isFailTasksOnLobTruncation() { return this.failTasksOnLobTruncation; } /** *
* The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical
* replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This
* heartbeat keeps restart_lsn
moving and prevents storage full scenarios.
*
restart_lsn
moving and prevents storage full scenarios.
*/
public void setHeartbeatEnable(Boolean heartbeatEnable) {
this.heartbeatEnable = heartbeatEnable;
}
/**
*
* The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical
* replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This
* heartbeat keeps restart_lsn
moving and prevents storage full scenarios.
*
restart_lsn
moving and prevents storage full scenarios.
*/
public Boolean getHeartbeatEnable() {
return this.heartbeatEnable;
}
/**
*
* The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical
* replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This
* heartbeat keeps restart_lsn
moving and prevents storage full scenarios.
*
restart_lsn
moving and prevents storage full scenarios.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withHeartbeatEnable(Boolean heartbeatEnable) {
setHeartbeatEnable(heartbeatEnable);
return this;
}
/**
*
* The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical
* replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This
* heartbeat keeps restart_lsn
moving and prevents storage full scenarios.
*
restart_lsn
moving and prevents storage full scenarios.
*/
public Boolean isHeartbeatEnable() {
return this.heartbeatEnable;
}
/**
* * Sets the schema in which the heartbeat artifacts are created. *
* * @param heartbeatSchema * Sets the schema in which the heartbeat artifacts are created. */ public void setHeartbeatSchema(String heartbeatSchema) { this.heartbeatSchema = heartbeatSchema; } /** ** Sets the schema in which the heartbeat artifacts are created. *
* * @return Sets the schema in which the heartbeat artifacts are created. */ public String getHeartbeatSchema() { return this.heartbeatSchema; } /** ** Sets the schema in which the heartbeat artifacts are created. *
* * @param heartbeatSchema * Sets the schema in which the heartbeat artifacts are created. * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withHeartbeatSchema(String heartbeatSchema) { setHeartbeatSchema(heartbeatSchema); return this; } /** ** Sets the WAL heartbeat frequency (in minutes). *
* * @param heartbeatFrequency * Sets the WAL heartbeat frequency (in minutes). */ public void setHeartbeatFrequency(Integer heartbeatFrequency) { this.heartbeatFrequency = heartbeatFrequency; } /** ** Sets the WAL heartbeat frequency (in minutes). *
* * @return Sets the WAL heartbeat frequency (in minutes). */ public Integer getHeartbeatFrequency() { return this.heartbeatFrequency; } /** ** Sets the WAL heartbeat frequency (in minutes). *
* * @param heartbeatFrequency * Sets the WAL heartbeat frequency (in minutes). * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withHeartbeatFrequency(Integer heartbeatFrequency) { setHeartbeatFrequency(heartbeatFrequency); return this; } /** ** Endpoint connection password. *
* * @param password * Endpoint connection password. */ public void setPassword(String password) { this.password = password; } /** ** Endpoint connection password. *
* * @return Endpoint connection password. */ public String getPassword() { return this.password; } /** ** Endpoint connection password. *
* * @param password * Endpoint connection password. * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withPassword(String password) { setPassword(password); return this; } /** ** Endpoint TCP port. The default is 5432. *
* * @param port * Endpoint TCP port. The default is 5432. */ public void setPort(Integer port) { this.port = port; } /** ** Endpoint TCP port. The default is 5432. *
* * @return Endpoint TCP port. The default is 5432. */ public Integer getPort() { return this.port; } /** ** Endpoint TCP port. The default is 5432. *
* * @param port * Endpoint TCP port. The default is 5432. * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withPort(Integer port) { setPort(port); return this; } /** ** The host name of the endpoint database. *
*
* For an Amazon RDS PostgreSQL instance, this is the output of DescribeDBInstances, in the
* Endpoint.Address
* field.
*
* For an Aurora PostgreSQL instance, this is the output of DescribeDBClusters, in the Endpoint
field.
*
* For an Amazon RDS PostgreSQL instance, this is the output of DescribeDBInstances, in the
* Endpoint.Address
* field.
*
* For an Aurora PostgreSQL instance, this is the output of DescribeDBClusters, in the Endpoint
field.
*/
public void setServerName(String serverName) {
this.serverName = serverName;
}
/**
*
* The host name of the endpoint database. *
*
* For an Amazon RDS PostgreSQL instance, this is the output of DescribeDBInstances, in the
* Endpoint.Address
* field.
*
* For an Aurora PostgreSQL instance, this is the output of DescribeDBClusters, in the Endpoint
field.
*
* For an Amazon RDS PostgreSQL instance, this is the output of DescribeDBInstances, in the
* Endpoint.Address
* field.
*
* For an Aurora PostgreSQL instance, this is the output of DescribeDBClusters, in the Endpoint
field.
*/
public String getServerName() {
return this.serverName;
}
/**
*
* The host name of the endpoint database. *
*
* For an Amazon RDS PostgreSQL instance, this is the output of DescribeDBInstances, in the
* Endpoint.Address
* field.
*
* For an Aurora PostgreSQL instance, this is the output of DescribeDBClusters, in the Endpoint
field.
*
* For an Amazon RDS PostgreSQL instance, this is the output of DescribeDBInstances, in the
* Endpoint.Address
* field.
*
* For an Aurora PostgreSQL instance, this is the output of DescribeDBClusters, in the Endpoint
field.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withServerName(String serverName) {
setServerName(serverName);
return this;
}
/**
*
* Endpoint connection user name. *
* * @param username * Endpoint connection user name. */ public void setUsername(String username) { this.username = username; } /** ** Endpoint connection user name. *
* * @return Endpoint connection user name. */ public String getUsername() { return this.username; } /** ** Endpoint connection user name. *
* * @param username * Endpoint connection user name. * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withUsername(String username) { setUsername(username); return this; } /** ** Sets the name of a previously created logical replication slot for a change data capture (CDC) load of the * PostgreSQL source instance. *
*
* When used with the CdcStartPosition
request parameter for the DMS API , this attribute also makes it
* possible to use native CDC start points. DMS verifies that the specified logical replication slot exists before
* starting the CDC load task. It also verifies that the task was created with a valid setting of
* CdcStartPosition
. If the specified slot doesn't exist or the task doesn't have a valid
* CdcStartPosition
setting, DMS raises an error.
*
* For more information about setting the CdcStartPosition
request parameter, see Determining a CDC native start point in the Database Migration Service User Guide. For more
* information about using CdcStartPosition
, see CreateReplicationTask, StartReplicationTask, and ModifyReplicationTask.
*
* When used with the CdcStartPosition
request parameter for the DMS API , this attribute also
* makes it possible to use native CDC start points. DMS verifies that the specified logical replication slot
* exists before starting the CDC load task. It also verifies that the task was created with a valid setting
* of CdcStartPosition
. If the specified slot doesn't exist or the task doesn't have a valid
* CdcStartPosition
setting, DMS raises an error.
*
* For more information about setting the CdcStartPosition
request parameter, see Determining a CDC native start point in the Database Migration Service User Guide. For more
* information about using CdcStartPosition
, see CreateReplicationTask, StartReplicationTask, and ModifyReplicationTask.
*/
public void setSlotName(String slotName) {
this.slotName = slotName;
}
/**
*
* Sets the name of a previously created logical replication slot for a change data capture (CDC) load of the * PostgreSQL source instance. *
*
* When used with the CdcStartPosition
request parameter for the DMS API , this attribute also makes it
* possible to use native CDC start points. DMS verifies that the specified logical replication slot exists before
* starting the CDC load task. It also verifies that the task was created with a valid setting of
* CdcStartPosition
. If the specified slot doesn't exist or the task doesn't have a valid
* CdcStartPosition
setting, DMS raises an error.
*
* For more information about setting the CdcStartPosition
request parameter, see Determining a CDC native start point in the Database Migration Service User Guide. For more
* information about using CdcStartPosition
, see CreateReplicationTask, StartReplicationTask, and ModifyReplicationTask.
*
* When used with the CdcStartPosition
request parameter for the DMS API , this attribute also
* makes it possible to use native CDC start points. DMS verifies that the specified logical replication
* slot exists before starting the CDC load task. It also verifies that the task was created with a valid
* setting of CdcStartPosition
. If the specified slot doesn't exist or the task doesn't have a
* valid CdcStartPosition
setting, DMS raises an error.
*
* For more information about setting the CdcStartPosition
request parameter, see Determining a CDC native start point in the Database Migration Service User Guide. For more
* information about using CdcStartPosition
, see CreateReplicationTask, StartReplicationTask, and ModifyReplicationTask.
*/
public String getSlotName() {
return this.slotName;
}
/**
*
* Sets the name of a previously created logical replication slot for a change data capture (CDC) load of the * PostgreSQL source instance. *
*
* When used with the CdcStartPosition
request parameter for the DMS API , this attribute also makes it
* possible to use native CDC start points. DMS verifies that the specified logical replication slot exists before
* starting the CDC load task. It also verifies that the task was created with a valid setting of
* CdcStartPosition
. If the specified slot doesn't exist or the task doesn't have a valid
* CdcStartPosition
setting, DMS raises an error.
*
* For more information about setting the CdcStartPosition
request parameter, see Determining a CDC native start point in the Database Migration Service User Guide. For more
* information about using CdcStartPosition
, see CreateReplicationTask, StartReplicationTask, and ModifyReplicationTask.
*
* When used with the CdcStartPosition
request parameter for the DMS API , this attribute also
* makes it possible to use native CDC start points. DMS verifies that the specified logical replication slot
* exists before starting the CDC load task. It also verifies that the task was created with a valid setting
* of CdcStartPosition
. If the specified slot doesn't exist or the task doesn't have a valid
* CdcStartPosition
setting, DMS raises an error.
*
* For more information about setting the CdcStartPosition
request parameter, see Determining a CDC native start point in the Database Migration Service User Guide. For more
* information about using CdcStartPosition
, see CreateReplicationTask, StartReplicationTask, and ModifyReplicationTask.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withSlotName(String slotName) {
setSlotName(slotName);
return this;
}
/**
*
* Specifies the plugin to use to create a replication slot. *
* * @param pluginName * Specifies the plugin to use to create a replication slot. * @see PluginNameValue */ public void setPluginName(String pluginName) { this.pluginName = pluginName; } /** ** Specifies the plugin to use to create a replication slot. *
* * @return Specifies the plugin to use to create a replication slot. * @see PluginNameValue */ public String getPluginName() { return this.pluginName; } /** ** Specifies the plugin to use to create a replication slot. *
* * @param pluginName * Specifies the plugin to use to create a replication slot. * @return Returns a reference to this object so that method calls can be chained together. * @see PluginNameValue */ public PostgreSQLSettings withPluginName(String pluginName) { setPluginName(pluginName); return this; } /** ** Specifies the plugin to use to create a replication slot. *
* * @param pluginName * Specifies the plugin to use to create a replication slot. * @return Returns a reference to this object so that method calls can be chained together. * @see PluginNameValue */ public PostgreSQLSettings withPluginName(PluginNameValue pluginName) { this.pluginName = pluginName.toString(); return this; } /** *
* The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the
* required permissions to access the value in SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services
* Secrets Manager secret that allows access to the PostgreSQL endpoint.
*
* You can specify one of two sets of values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for UserName
,
* Password
, ServerName
, and Port
. You can't specify both. For more
* information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
* and SecretsManagerSecretId
required to access it, see Using
* secrets to access Database Migration Service resources in the Database Migration Service User Guide.
*
SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web
* Services Secrets Manager secret that allows access to the PostgreSQL endpoint.
* You can specify one of two sets of values for these permissions. You can specify the values for this
* setting and SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and Port
. You can't
* specify both. For more information on creating this SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it,
* see Using secrets to access Database Migration Service resources in the Database Migration Service
* User Guide.
*
* The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the
* required permissions to access the value in SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services
* Secrets Manager secret that allows access to the PostgreSQL endpoint.
*
* You can specify one of two sets of values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for UserName
,
* Password
, ServerName
, and Port
. You can't specify both. For more
* information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
* and SecretsManagerSecretId
required to access it, see Using
* secrets to access Database Migration Service resources in the Database Migration Service User Guide.
*
SecretsManagerSecret
. The role must allow
* the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web
* Services Secrets Manager secret that allows access to the PostgreSQL endpoint.
* You can specify one of two sets of values for these permissions. You can specify the values for this
* setting and SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and Port
. You can't
* specify both. For more information on creating this SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it,
* see Using secrets to access Database Migration Service resources in the Database Migration Service
* User Guide.
*
* The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the
* required permissions to access the value in SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services
* Secrets Manager secret that allows access to the PostgreSQL endpoint.
*
* You can specify one of two sets of values for these permissions. You can specify the values for this setting and
* SecretsManagerSecretId
. Or you can specify clear-text values for UserName
,
* Password
, ServerName
, and Port
. You can't specify both. For more
* information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
* and SecretsManagerSecretId
required to access it, see Using
* secrets to access Database Migration Service resources in the Database Migration Service User Guide.
*
SecretsManagerSecret
. The role must allow the
* iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web
* Services Secrets Manager secret that allows access to the PostgreSQL endpoint.
* You can specify one of two sets of values for these permissions. You can specify the values for this
* setting and SecretsManagerSecretId
. Or you can specify clear-text values for
* UserName
, Password
, ServerName
, and Port
. You can't
* specify both. For more information on creating this SecretsManagerSecret
and the
* SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it,
* see Using secrets to access Database Migration Service resources in the Database Migration Service
* User Guide.
*
* The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the PostgreSQL
* endpoint connection details.
*
SecretsManagerSecret
that contains the
* PostgreSQL endpoint connection details.
*/
public void setSecretsManagerSecretId(String secretsManagerSecretId) {
this.secretsManagerSecretId = secretsManagerSecretId;
}
/**
*
* The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the PostgreSQL
* endpoint connection details.
*
SecretsManagerSecret
that contains the
* PostgreSQL endpoint connection details.
*/
public String getSecretsManagerSecretId() {
return this.secretsManagerSecretId;
}
/**
*
* The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the PostgreSQL
* endpoint connection details.
*
SecretsManagerSecret
that contains the
* PostgreSQL endpoint connection details.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withSecretsManagerSecretId(String secretsManagerSecretId) {
setSecretsManagerSecretId(secretsManagerSecretId);
return this;
}
/**
*
* Use the TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types during
* migration. The default value is true
.
*
TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types
* during migration. The default value is true
.
*/
public void setTrimSpaceInChar(Boolean trimSpaceInChar) {
this.trimSpaceInChar = trimSpaceInChar;
}
/**
*
* Use the TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types during
* migration. The default value is true
.
*
TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types
* during migration. The default value is true
.
*/
public Boolean getTrimSpaceInChar() {
return this.trimSpaceInChar;
}
/**
*
* Use the TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types during
* migration. The default value is true
.
*
TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types
* during migration. The default value is true
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withTrimSpaceInChar(Boolean trimSpaceInChar) {
setTrimSpaceInChar(trimSpaceInChar);
return this;
}
/**
*
* Use the TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types during
* migration. The default value is true
.
*
TrimSpaceInChar
source endpoint setting to trim data on CHAR and NCHAR data types
* during migration. The default value is true
.
*/
public Boolean isTrimSpaceInChar() {
return this.trimSpaceInChar;
}
/**
*
* When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as
* varchar(5)
.
*
varchar(5)
.
*/
public void setMapBooleanAsBoolean(Boolean mapBooleanAsBoolean) {
this.mapBooleanAsBoolean = mapBooleanAsBoolean;
}
/**
*
* When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as
* varchar(5)
.
*
varchar(5)
.
*/
public Boolean getMapBooleanAsBoolean() {
return this.mapBooleanAsBoolean;
}
/**
*
* When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as
* varchar(5)
.
*
varchar(5)
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PostgreSQLSettings withMapBooleanAsBoolean(Boolean mapBooleanAsBoolean) {
setMapBooleanAsBoolean(mapBooleanAsBoolean);
return this;
}
/**
*
* When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as
* varchar(5)
.
*
varchar(5)
.
*/
public Boolean isMapBooleanAsBoolean() {
return this.mapBooleanAsBoolean;
}
/**
* * When true, DMS migrates JSONB values as CLOB. *
* * @param mapJsonbAsClob * When true, DMS migrates JSONB values as CLOB. */ public void setMapJsonbAsClob(Boolean mapJsonbAsClob) { this.mapJsonbAsClob = mapJsonbAsClob; } /** ** When true, DMS migrates JSONB values as CLOB. *
* * @return When true, DMS migrates JSONB values as CLOB. */ public Boolean getMapJsonbAsClob() { return this.mapJsonbAsClob; } /** ** When true, DMS migrates JSONB values as CLOB. *
* * @param mapJsonbAsClob * When true, DMS migrates JSONB values as CLOB. * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withMapJsonbAsClob(Boolean mapJsonbAsClob) { setMapJsonbAsClob(mapJsonbAsClob); return this; } /** ** When true, DMS migrates JSONB values as CLOB. *
* * @return When true, DMS migrates JSONB values as CLOB. */ public Boolean isMapJsonbAsClob() { return this.mapJsonbAsClob; } /** ** When true, DMS migrates LONG values as VARCHAR. *
* * @param mapLongVarcharAs * When true, DMS migrates LONG values as VARCHAR. * @see LongVarcharMappingType */ public void setMapLongVarcharAs(String mapLongVarcharAs) { this.mapLongVarcharAs = mapLongVarcharAs; } /** ** When true, DMS migrates LONG values as VARCHAR. *
* * @return When true, DMS migrates LONG values as VARCHAR. * @see LongVarcharMappingType */ public String getMapLongVarcharAs() { return this.mapLongVarcharAs; } /** ** When true, DMS migrates LONG values as VARCHAR. *
* * @param mapLongVarcharAs * When true, DMS migrates LONG values as VARCHAR. * @return Returns a reference to this object so that method calls can be chained together. * @see LongVarcharMappingType */ public PostgreSQLSettings withMapLongVarcharAs(String mapLongVarcharAs) { setMapLongVarcharAs(mapLongVarcharAs); return this; } /** ** When true, DMS migrates LONG values as VARCHAR. *
* * @param mapLongVarcharAs * When true, DMS migrates LONG values as VARCHAR. * @return Returns a reference to this object so that method calls can be chained together. * @see LongVarcharMappingType */ public PostgreSQLSettings withMapLongVarcharAs(LongVarcharMappingType mapLongVarcharAs) { this.mapLongVarcharAs = mapLongVarcharAs.toString(); return this; } /** ** Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You can use * this setting to specify replication behavior for endpoints that require additional configuration, such as * Babelfish endpoints. *
* * @param databaseMode * Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You * can use this setting to specify replication behavior for endpoints that require additional configuration, * such as Babelfish endpoints. * @see DatabaseMode */ public void setDatabaseMode(String databaseMode) { this.databaseMode = databaseMode; } /** ** Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You can use * this setting to specify replication behavior for endpoints that require additional configuration, such as * Babelfish endpoints. *
* * @return Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You * can use this setting to specify replication behavior for endpoints that require additional configuration, * such as Babelfish endpoints. * @see DatabaseMode */ public String getDatabaseMode() { return this.databaseMode; } /** ** Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You can use * this setting to specify replication behavior for endpoints that require additional configuration, such as * Babelfish endpoints. *
* * @param databaseMode * Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You * can use this setting to specify replication behavior for endpoints that require additional configuration, * such as Babelfish endpoints. * @return Returns a reference to this object so that method calls can be chained together. * @see DatabaseMode */ public PostgreSQLSettings withDatabaseMode(String databaseMode) { setDatabaseMode(databaseMode); return this; } /** ** Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You can use * this setting to specify replication behavior for endpoints that require additional configuration, such as * Babelfish endpoints. *
* * @param databaseMode * Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You * can use this setting to specify replication behavior for endpoints that require additional configuration, * such as Babelfish endpoints. * @return Returns a reference to this object so that method calls can be chained together. * @see DatabaseMode */ public PostgreSQLSettings withDatabaseMode(DatabaseMode databaseMode) { this.databaseMode = databaseMode.toString(); return this; } /** ** The Babelfish for Aurora PostgreSQL database name for the endpoint. *
* * @param babelfishDatabaseName * The Babelfish for Aurora PostgreSQL database name for the endpoint. */ public void setBabelfishDatabaseName(String babelfishDatabaseName) { this.babelfishDatabaseName = babelfishDatabaseName; } /** ** The Babelfish for Aurora PostgreSQL database name for the endpoint. *
* * @return The Babelfish for Aurora PostgreSQL database name for the endpoint. */ public String getBabelfishDatabaseName() { return this.babelfishDatabaseName; } /** ** The Babelfish for Aurora PostgreSQL database name for the endpoint. *
* * @param babelfishDatabaseName * The Babelfish for Aurora PostgreSQL database name for the endpoint. * @return Returns a reference to this object so that method calls can be chained together. */ public PostgreSQLSettings withBabelfishDatabaseName(String babelfishDatabaseName) { setBabelfishDatabaseName(babelfishDatabaseName); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getAfterConnectScript() != null) sb.append("AfterConnectScript: ").append(getAfterConnectScript()).append(","); if (getCaptureDdls() != null) sb.append("CaptureDdls: ").append(getCaptureDdls()).append(","); if (getMaxFileSize() != null) sb.append("MaxFileSize: ").append(getMaxFileSize()).append(","); if (getDatabaseName() != null) sb.append("DatabaseName: ").append(getDatabaseName()).append(","); if (getDdlArtifactsSchema() != null) sb.append("DdlArtifactsSchema: ").append(getDdlArtifactsSchema()).append(","); if (getExecuteTimeout() != null) sb.append("ExecuteTimeout: ").append(getExecuteTimeout()).append(","); if (getFailTasksOnLobTruncation() != null) sb.append("FailTasksOnLobTruncation: ").append(getFailTasksOnLobTruncation()).append(","); if (getHeartbeatEnable() != null) sb.append("HeartbeatEnable: ").append(getHeartbeatEnable()).append(","); if (getHeartbeatSchema() != null) sb.append("HeartbeatSchema: ").append(getHeartbeatSchema()).append(","); if (getHeartbeatFrequency() != null) sb.append("HeartbeatFrequency: ").append(getHeartbeatFrequency()).append(","); if (getPassword() != null) sb.append("Password: ").append("***Sensitive Data Redacted***").append(","); if (getPort() != null) sb.append("Port: ").append(getPort()).append(","); if (getServerName() != null) sb.append("ServerName: ").append(getServerName()).append(","); if (getUsername() != null) sb.append("Username: ").append(getUsername()).append(","); if (getSlotName() != null) sb.append("SlotName: ").append(getSlotName()).append(","); if (getPluginName() != null) sb.append("PluginName: ").append(getPluginName()).append(","); if (getSecretsManagerAccessRoleArn() != null) sb.append("SecretsManagerAccessRoleArn: ").append(getSecretsManagerAccessRoleArn()).append(","); if (getSecretsManagerSecretId() != null) sb.append("SecretsManagerSecretId: ").append(getSecretsManagerSecretId()).append(","); if (getTrimSpaceInChar() != null) sb.append("TrimSpaceInChar: ").append(getTrimSpaceInChar()).append(","); if (getMapBooleanAsBoolean() != null) sb.append("MapBooleanAsBoolean: ").append(getMapBooleanAsBoolean()).append(","); if (getMapJsonbAsClob() != null) sb.append("MapJsonbAsClob: ").append(getMapJsonbAsClob()).append(","); if (getMapLongVarcharAs() != null) sb.append("MapLongVarcharAs: ").append(getMapLongVarcharAs()).append(","); if (getDatabaseMode() != null) sb.append("DatabaseMode: ").append(getDatabaseMode()).append(","); if (getBabelfishDatabaseName() != null) sb.append("BabelfishDatabaseName: ").append(getBabelfishDatabaseName()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof PostgreSQLSettings == false) return false; PostgreSQLSettings other = (PostgreSQLSettings) obj; if (other.getAfterConnectScript() == null ^ this.getAfterConnectScript() == null) return false; if (other.getAfterConnectScript() != null && other.getAfterConnectScript().equals(this.getAfterConnectScript()) == false) return false; if (other.getCaptureDdls() == null ^ this.getCaptureDdls() == null) return false; if (other.getCaptureDdls() != null && other.getCaptureDdls().equals(this.getCaptureDdls()) == false) return false; if (other.getMaxFileSize() == null ^ this.getMaxFileSize() == null) return false; if (other.getMaxFileSize() != null && other.getMaxFileSize().equals(this.getMaxFileSize()) == false) return false; if (other.getDatabaseName() == null ^ this.getDatabaseName() == null) return false; if (other.getDatabaseName() != null && other.getDatabaseName().equals(this.getDatabaseName()) == false) return false; if (other.getDdlArtifactsSchema() == null ^ this.getDdlArtifactsSchema() == null) return false; if (other.getDdlArtifactsSchema() != null && other.getDdlArtifactsSchema().equals(this.getDdlArtifactsSchema()) == false) return false; if (other.getExecuteTimeout() == null ^ this.getExecuteTimeout() == null) return false; if (other.getExecuteTimeout() != null && other.getExecuteTimeout().equals(this.getExecuteTimeout()) == false) return false; if (other.getFailTasksOnLobTruncation() == null ^ this.getFailTasksOnLobTruncation() == null) return false; if (other.getFailTasksOnLobTruncation() != null && other.getFailTasksOnLobTruncation().equals(this.getFailTasksOnLobTruncation()) == false) return false; if (other.getHeartbeatEnable() == null ^ this.getHeartbeatEnable() == null) return false; if (other.getHeartbeatEnable() != null && other.getHeartbeatEnable().equals(this.getHeartbeatEnable()) == false) return false; if (other.getHeartbeatSchema() == null ^ this.getHeartbeatSchema() == null) return false; if (other.getHeartbeatSchema() != null && other.getHeartbeatSchema().equals(this.getHeartbeatSchema()) == false) return false; if (other.getHeartbeatFrequency() == null ^ this.getHeartbeatFrequency() == null) return false; if (other.getHeartbeatFrequency() != null && other.getHeartbeatFrequency().equals(this.getHeartbeatFrequency()) == false) return false; if (other.getPassword() == null ^ this.getPassword() == null) return false; if (other.getPassword() != null && other.getPassword().equals(this.getPassword()) == false) return false; if (other.getPort() == null ^ this.getPort() == null) return false; if (other.getPort() != null && other.getPort().equals(this.getPort()) == false) return false; if (other.getServerName() == null ^ this.getServerName() == null) return false; if (other.getServerName() != null && other.getServerName().equals(this.getServerName()) == false) return false; if (other.getUsername() == null ^ this.getUsername() == null) return false; if (other.getUsername() != null && other.getUsername().equals(this.getUsername()) == false) return false; if (other.getSlotName() == null ^ this.getSlotName() == null) return false; if (other.getSlotName() != null && other.getSlotName().equals(this.getSlotName()) == false) return false; if (other.getPluginName() == null ^ this.getPluginName() == null) return false; if (other.getPluginName() != null && other.getPluginName().equals(this.getPluginName()) == false) return false; if (other.getSecretsManagerAccessRoleArn() == null ^ this.getSecretsManagerAccessRoleArn() == null) return false; if (other.getSecretsManagerAccessRoleArn() != null && other.getSecretsManagerAccessRoleArn().equals(this.getSecretsManagerAccessRoleArn()) == false) return false; if (other.getSecretsManagerSecretId() == null ^ this.getSecretsManagerSecretId() == null) return false; if (other.getSecretsManagerSecretId() != null && other.getSecretsManagerSecretId().equals(this.getSecretsManagerSecretId()) == false) return false; if (other.getTrimSpaceInChar() == null ^ this.getTrimSpaceInChar() == null) return false; if (other.getTrimSpaceInChar() != null && other.getTrimSpaceInChar().equals(this.getTrimSpaceInChar()) == false) return false; if (other.getMapBooleanAsBoolean() == null ^ this.getMapBooleanAsBoolean() == null) return false; if (other.getMapBooleanAsBoolean() != null && other.getMapBooleanAsBoolean().equals(this.getMapBooleanAsBoolean()) == false) return false; if (other.getMapJsonbAsClob() == null ^ this.getMapJsonbAsClob() == null) return false; if (other.getMapJsonbAsClob() != null && other.getMapJsonbAsClob().equals(this.getMapJsonbAsClob()) == false) return false; if (other.getMapLongVarcharAs() == null ^ this.getMapLongVarcharAs() == null) return false; if (other.getMapLongVarcharAs() != null && other.getMapLongVarcharAs().equals(this.getMapLongVarcharAs()) == false) return false; if (other.getDatabaseMode() == null ^ this.getDatabaseMode() == null) return false; if (other.getDatabaseMode() != null && other.getDatabaseMode().equals(this.getDatabaseMode()) == false) return false; if (other.getBabelfishDatabaseName() == null ^ this.getBabelfishDatabaseName() == null) return false; if (other.getBabelfishDatabaseName() != null && other.getBabelfishDatabaseName().equals(this.getBabelfishDatabaseName()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getAfterConnectScript() == null) ? 0 : getAfterConnectScript().hashCode()); hashCode = prime * hashCode + ((getCaptureDdls() == null) ? 0 : getCaptureDdls().hashCode()); hashCode = prime * hashCode + ((getMaxFileSize() == null) ? 0 : getMaxFileSize().hashCode()); hashCode = prime * hashCode + ((getDatabaseName() == null) ? 0 : getDatabaseName().hashCode()); hashCode = prime * hashCode + ((getDdlArtifactsSchema() == null) ? 0 : getDdlArtifactsSchema().hashCode()); hashCode = prime * hashCode + ((getExecuteTimeout() == null) ? 0 : getExecuteTimeout().hashCode()); hashCode = prime * hashCode + ((getFailTasksOnLobTruncation() == null) ? 0 : getFailTasksOnLobTruncation().hashCode()); hashCode = prime * hashCode + ((getHeartbeatEnable() == null) ? 0 : getHeartbeatEnable().hashCode()); hashCode = prime * hashCode + ((getHeartbeatSchema() == null) ? 0 : getHeartbeatSchema().hashCode()); hashCode = prime * hashCode + ((getHeartbeatFrequency() == null) ? 0 : getHeartbeatFrequency().hashCode()); hashCode = prime * hashCode + ((getPassword() == null) ? 0 : getPassword().hashCode()); hashCode = prime * hashCode + ((getPort() == null) ? 0 : getPort().hashCode()); hashCode = prime * hashCode + ((getServerName() == null) ? 0 : getServerName().hashCode()); hashCode = prime * hashCode + ((getUsername() == null) ? 0 : getUsername().hashCode()); hashCode = prime * hashCode + ((getSlotName() == null) ? 0 : getSlotName().hashCode()); hashCode = prime * hashCode + ((getPluginName() == null) ? 0 : getPluginName().hashCode()); hashCode = prime * hashCode + ((getSecretsManagerAccessRoleArn() == null) ? 0 : getSecretsManagerAccessRoleArn().hashCode()); hashCode = prime * hashCode + ((getSecretsManagerSecretId() == null) ? 0 : getSecretsManagerSecretId().hashCode()); hashCode = prime * hashCode + ((getTrimSpaceInChar() == null) ? 0 : getTrimSpaceInChar().hashCode()); hashCode = prime * hashCode + ((getMapBooleanAsBoolean() == null) ? 0 : getMapBooleanAsBoolean().hashCode()); hashCode = prime * hashCode + ((getMapJsonbAsClob() == null) ? 0 : getMapJsonbAsClob().hashCode()); hashCode = prime * hashCode + ((getMapLongVarcharAs() == null) ? 0 : getMapLongVarcharAs().hashCode()); hashCode = prime * hashCode + ((getDatabaseMode() == null) ? 0 : getDatabaseMode().hashCode()); hashCode = prime * hashCode + ((getBabelfishDatabaseName() == null) ? 0 : getBabelfishDatabaseName().hashCode()); return hashCode; } @Override public PostgreSQLSettings clone() { try { return (PostgreSQLSettings) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.databasemigrationservice.model.transform.PostgreSQLSettingsMarshaller.getInstance().marshall(this, protocolMarshaller); } }