/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.databasemigrationservice.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *

* Settings for exporting data to Amazon S3. *

* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class S3Settings implements Serializable, Cloneable, StructuredPojo { /** *

* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the * iam:PassRole action. It is a required parameter that enables DMS to write and read objects from an * S3 bucket. *

*/ private String serviceAccessRoleArn; /** *

* Specifies how tables are defined in the S3 source files only. *

*/ private String externalTableDefinition; /** *

* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return * (\n). *

*/ private String csvRowDelimiter; /** *

* The delimiter used to separate columns in the .csv file for both source and target. The default is a comma. *

*/ private String csvDelimiter; /** *

* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path * bucketFolder/schema_name/table_name/. If this parameter isn't specified, then * the path used is schema_name/table_name/. *

*/ private String bucketFolder; /** *

* The name of the S3 bucket. *

*/ private String bucketName; /** *

* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either * set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies * to both .csv and .parquet file formats. *

*/ private String compressionType; /** *

* The type of server-side encryption that you want to use for your data. This encryption type is part of the * endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 * (the default) or SSE_KMS. *

* *

* For the ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the * existing value from SSE_S3 to SSE_KMS. *

*
*

* To use SSE_S3, you need an Identity and Access Management (IAM) role with permission to allow * "arn:aws:s3:::dms-*" to use the following actions: *

* */ private String encryptionMode; /** *

* If you are using SSE_KMS for the EncryptionMode, provide the KMS key ID. The key that * you use needs an attached policy that enables Identity and Access Management (IAM) user permissions and allows * use of the key. *

*

* Here is a CLI example: * aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ private String serverSideEncryptionKmsKeyId; /** *

* The format of the data that you want to use for output. You can choose one of the following: *

* */ private String dataFormat; /** *

* The type of encoding you are using: *

* */ private String encodingType; /** *

* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is * stored using an encoding type of PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN encoding. This size is used for * .parquet file format only. *

*/ private Integer dictPageSizeLimit; /** *

* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row * groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet * file format only. *

*

* If you choose a value larger than the maximum, RowGroupLength is set to the max row group length in * bytes (64 * 1024 * 1024). *

*/ private Integer rowGroupLength; /** *

* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for * .parquet file format only. *

*/ private Integer dataPageSize; /** *

* The version of the Apache Parquet format that you want to use: parquet_1_0 (the default) or * parquet_2_0. *

*/ private String parquetVersion; /** *

* A value that enables statistics for Parquet pages and row groups. Choose true to enable statistics, * false to disable. Statistics include NULL, DISTINCT, MAX, and * MIN values. This parameter defaults to true. This value is used for .parquet file * format only. *

*/ private Boolean enableStatistics; /** *

* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or .parquet * output files only to indicate how the rows were added to the source database. *

* *

* DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and later. *

*

* DMS supports the use of the .parquet files with the IncludeOpForFullLoad parameter in versions 3.4.7 * and later. *

*
*

* For full load, records can only be inserted. By default (the false setting), no information is * recorded in these output files for a full load to indicate that the rows were inserted at the source database. If * IncludeOpForFullLoad is set to true or y, the INSERT is recorded as an I * annotation in the first field of the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load. *

* *

* This setting works together with the CdcInsertsOnly and the CdcInsertsAndUpdates * parameters for output to .csv files only. For more information about how these settings work together, see * Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide.. *

*
*/ private Boolean includeOpForFullLoad; /** *

* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage * (.parquet) output files. By default (the false setting), the first field in a .csv or .parquet * record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was * inserted, updated, or deleted at the source database for a CDC load to the target. *

*

* If CdcInsertsOnly is set to true or y, only INSERTs from the source * database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends * on the value of IncludeOpForFullLoad. If IncludeOpForFullLoad is set to * true, the first field of every CDC record is set to I to indicate the INSERT operation at the * source. If IncludeOpForFullLoad is set to false, every CDC record is written without a * first field to indicate the INSERT operation at the source. For more information about how these settings work * together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide.. *

* *

* DMS supports the interaction described preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later. *

*

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the * same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true * for the same endpoint, but not both. *

*
*/ private Boolean cdcInsertsOnly; /** *

* A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for an * Amazon S3 target. *

* *

* DMS supports the TimestampColumnName parameter in versions 3.1.4 and later. *

*
*

* DMS includes an additional STRING column in the .csv or .parquet object files of your migrated data * when you set TimestampColumnName to a nonblank value. *

*

* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from * the source to the target by DMS. *

*

* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of * that row in the source database. *

*

* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, the * precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit * timestamp supported by DMS for the source database. *

*

* When the AddColumnName parameter is set to true, DMS also includes a name for the * timestamp column that you set with TimestampColumnName. *

*/ private String timestampColumnName; /** *

* A value that specifies the precision of any TIMESTAMP column values that are written to an Amazon S3 * object file in .parquet format. *

* *

* DMS supports the ParquetTimestampInMillisecond parameter in versions 3.1.4 and later. *

*
*

* When ParquetTimestampInMillisecond is set to true or y, DMS writes all * TIMESTAMP columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes * them with microsecond precision. *

*

* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP values. Set * this parameter to true for S3 endpoint object files that are .parquet formatted only if you plan to * query or process the data with Athena or Glue. *

* *

* DMS writes any TIMESTAMP column values written to an S3 file in .csv format with microsecond * precision. *

*

* Setting ParquetTimestampInMillisecond has no effect on the string format of the timestamp column * value that is inserted by setting the TimestampColumnName parameter. *

*
*/ private Boolean parquetTimestampInMillisecond; /** *

* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet * (columnar storage) output files. The default setting is false, but when * CdcInsertsAndUpdates is set to true or y, only INSERTs and UPDATEs from * the source database are migrated to the .csv or .parquet file. *

* *

* DMS supports the use of the .parquet files in versions 3.4.7 and later. *

*
*

* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the first field of every CDC record * is set to either I or U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are written without an indication of * INSERT or UPDATE operations at the source. For more information about how these settings work together, see * Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide.. *

* *

* DMS supports the use of the CdcInsertsAndUpdates parameter in versions 3.3.1 and later. *

*

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the * same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true * for the same endpoint, but not both. *

*
*/ private Boolean cdcInsertsAndUpdates; /** *

* When set to true, this parameter partitions S3 bucket folders based on transaction commit dates. The * default value is false. For more information about date-based folder partitioning, see Using * date-based folder partitioning. *

*/ private Boolean datePartitionEnabled; /** *

* Identifies the sequence of the date format to use during folder partitioning. The default value is * YYYYMMDD. Use this parameter when DatePartitionedEnabled is set to true. *

*/ private String datePartitionSequence; /** *

* Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH. * Use this parameter when DatePartitionedEnabled is set to true. *

*/ private String datePartitionDelimiter; /** *

* This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format. * If set to true for columns not included in the supplemental log, DMS uses the value specified by * CsvNoSupValue . If not set or set to false, DMS uses the null value for these * columns. *

* *

* This setting is supported in DMS versions 3.4.1 and later. *

*
*/ private Boolean useCsvNoSupValue; /** *

* This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in * .csv format. If * UseCsvNoSupValue is set to true, specify a string value that you want DMS to use for all * columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for * these columns regardless of the UseCsvNoSupValue setting. *

* *

* This setting is supported in DMS versions 3.4.1 and later. *

*
*/ private String csvNoSupValue; /** *

* If set to true, DMS saves the transaction order for a change data capture (CDC) load on the Amazon * S3 target specified by * CdcPath . For more information, see Capturing data changes (CDC) including transaction order on the S3 target. *

* *

* This setting is supported in DMS versions 3.4.2 and later. *

*
*/ private Boolean preserveTransactions; /** *

* Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change * data; otherwise, it's optional. If CdcPath is set, DMS reads CDC files from this path and replicates * the data changes to the target endpoint. For an S3 target if you set PreserveTransactions to true, DMS verifies that you have set this parameter to a * folder path on your S3 target where DMS can save the transaction order for the CDC load. DMS creates this CDC * folder path in either your S3 target working directory or the S3 target location specified by * BucketFolder and * BucketName . *

*

* For example, if you specify CdcPath as MyChangedData, and you specify * BucketName as MyTargetBucket but do not specify BucketFolder, DMS creates * the CDC folder path following: MyTargetBucket/MyChangedData. *

*

* If you specify the same CdcPath, and you specify BucketName as * MyTargetBucket and BucketFolder as MyTargetData, DMS creates the CDC * folder path following: MyTargetBucket/MyTargetData/MyChangedData. *

*

* For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target. *

* *

* This setting is supported in DMS versions 3.4.2 and later. *

*
*/ private String cdcPath; /** *

* When set to true, this parameter uses the task start time as the timestamp column value instead of the time data * is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp is set to * true, each row of the timestamp column contains the task start time. For CDC loads, each row of the * timestamp column contains the transaction commit time. *

*

* When useTaskStartTimeForFullLoadTimestamp is set to false, the full load timestamp in * the timestamp column increments with the time data arrives at the target. *

*/ private Boolean useTaskStartTimeForFullLoadTimestamp; /** *

* A value that enables DMS to specify a predefined (canned) access control list for objects created in an Amazon S3 * bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the * Amazon S3 Developer Guide. *

*

* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL. *

*/ private String cannedAclForObjects; /** *

* An optional parameter that, when set to true or y, you can use to add column name * information to the .csv output file. *

*

* The default value is false. Valid values are true, false, y, * and n. *

*/ private Boolean addColumnName; /** *

* Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. *

*

* When CdcMaxBatchInterval and CdcMinFileSize are both specified, the file write is * triggered by whichever parameter condition is met first within an DMS CloudFormation template. *

*

* The default value is 60 seconds. *

*/ private Integer cdcMaxBatchInterval; /** *

* Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3. *

*

* When CdcMinFileSize and CdcMaxBatchInterval are both specified, the file write is * triggered by whichever parameter condition is met first within an DMS CloudFormation template. *

*

* The default value is 32 MB. *

*/ private Integer cdcMinFileSize; /** *

* An optional parameter that specifies how DMS treats null values. While handling the null value, you can use this * parameter to pass a user-defined string as null when writing to the target. For example, when target columns are * not nullable, you can use this option to differentiate between the empty string value and the null value. So, if * you set this parameter value to the empty string ("" or ''), DMS treats the empty string as the null value * instead of NULL. *

*

* The default value is NULL. Valid values include any valid string. *

*/ private String csvNullValue; /** *

* When this value is set to 1, DMS ignores the first row header in a .csv file. A value of 1 turns on the feature; * a value of 0 turns off the feature. *

*

* The default is 0. *

*/ private Integer ignoreHeaderRows; /** *

* A value that specifies the maximum size (in KB) of any .csv file to be created while migrating to an S3 target * during full load. *

*

* The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576. *

*/ private Integer maxFileSize; /** *

* For an S3 source, when this value is set to true or y, each leading double quotation * mark has to be followed by an ending double quotation mark. This formatting complies with RFC 4180. When this * value is set to false or n, string literals are copied to the target as is. In this * case, a delimiter (row or column) signals the end of the field. Thus, you can't use a delimiter as part of the * string, because it signals the end of the value. *

*

* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to Amazon * S3 using .csv file format only. When this value is set to true or y using Amazon S3 as * a target, if the data has quotation marks or newline characters in it, DMS encloses the entire column with an * additional pair of double quotation marks ("). Every quotation mark within the data is repeated twice. *

*

* The default value is true. Valid values include true, false, * y, and n. *

*/ private Boolean rfc4180; /** *

* When creating an S3 target endpoint, set DatePartitionTimezone to convert the current UTC time into * a specified time zone. The conversion occurs when a date partition folder is created and a CDC filename is * generated. The time zone format is Area/Location. Use this parameter when DatePartitionedEnabled is * set to true, as shown in the following example. *

*

* s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": "dms-nattarat-test"}' *

*/ private String datePartitionTimezone; /** *

* Use the S3 target endpoint setting AddTrailingPaddingCharacter to add padding on string data. The * default value is false. *

*/ private Boolean addTrailingPaddingCharacter; /** *

* To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner endpoint setting. *

*

* Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' *

*

* When you make a request to test a connection or perform a migration, S3 checks the account ID of the bucket owner * against the specified parameter. *

*/ private String expectedBucketOwner; /** *

* When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query your * data. *

*/ private Boolean glueCatalogGeneration; /** *

* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the * iam:PassRole action. It is a required parameter that enables DMS to write and read objects from an * S3 bucket. *

* * @param serviceAccessRoleArn * The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the * iam:PassRole action. It is a required parameter that enables DMS to write and read objects * from an S3 bucket. */ public void setServiceAccessRoleArn(String serviceAccessRoleArn) { this.serviceAccessRoleArn = serviceAccessRoleArn; } /** *

* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the * iam:PassRole action. It is a required parameter that enables DMS to write and read objects from an * S3 bucket. *

* * @return The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the * iam:PassRole action. It is a required parameter that enables DMS to write and read objects * from an S3 bucket. */ public String getServiceAccessRoleArn() { return this.serviceAccessRoleArn; } /** *

* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the * iam:PassRole action. It is a required parameter that enables DMS to write and read objects from an * S3 bucket. *

* * @param serviceAccessRoleArn * The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the * iam:PassRole action. It is a required parameter that enables DMS to write and read objects * from an S3 bucket. * @return Returns a reference to this object so that method calls can be chained together. */ public S3Settings withServiceAccessRoleArn(String serviceAccessRoleArn) { setServiceAccessRoleArn(serviceAccessRoleArn); return this; } /** *

* Specifies how tables are defined in the S3 source files only. *

* * @param externalTableDefinition * Specifies how tables are defined in the S3 source files only. */ public void setExternalTableDefinition(String externalTableDefinition) { this.externalTableDefinition = externalTableDefinition; } /** *

* Specifies how tables are defined in the S3 source files only. *

* * @return Specifies how tables are defined in the S3 source files only. */ public String getExternalTableDefinition() { return this.externalTableDefinition; } /** *

* Specifies how tables are defined in the S3 source files only. *

* * @param externalTableDefinition * Specifies how tables are defined in the S3 source files only. * @return Returns a reference to this object so that method calls can be chained together. */ public S3Settings withExternalTableDefinition(String externalTableDefinition) { setExternalTableDefinition(externalTableDefinition); return this; } /** *

* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return * (\n). *

* * @param csvRowDelimiter * The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage * return (\n). */ public void setCsvRowDelimiter(String csvRowDelimiter) { this.csvRowDelimiter = csvRowDelimiter; } /** *

* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return * (\n). *

* * @return The delimiter used to separate rows in the .csv file for both source and target. The default is a * carriage return (\n). */ public String getCsvRowDelimiter() { return this.csvRowDelimiter; } /** *

* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return * (\n). *

* * @param csvRowDelimiter * The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage * return (\n). * @return Returns a reference to this object so that method calls can be chained together. */ public S3Settings withCsvRowDelimiter(String csvRowDelimiter) { setCsvRowDelimiter(csvRowDelimiter); return this; } /** *

* The delimiter used to separate columns in the .csv file for both source and target. The default is a comma. *

* * @param csvDelimiter * The delimiter used to separate columns in the .csv file for both source and target. The default is a * comma. */ public void setCsvDelimiter(String csvDelimiter) { this.csvDelimiter = csvDelimiter; } /** *

* The delimiter used to separate columns in the .csv file for both source and target. The default is a comma. *

* * @return The delimiter used to separate columns in the .csv file for both source and target. The default is a * comma. */ public String getCsvDelimiter() { return this.csvDelimiter; } /** *

* The delimiter used to separate columns in the .csv file for both source and target. The default is a comma. *

* * @param csvDelimiter * The delimiter used to separate columns in the .csv file for both source and target. The default is a * comma. * @return Returns a reference to this object so that method calls can be chained together. */ public S3Settings withCsvDelimiter(String csvDelimiter) { setCsvDelimiter(csvDelimiter); return this; } /** *

* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path * bucketFolder/schema_name/table_name/. If this parameter isn't specified, then * the path used is schema_name/table_name/. *

* * @param bucketFolder * An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path * bucketFolder/schema_name/table_name/. If this parameter isn't * specified, then the path used is schema_name/table_name/. */ public void setBucketFolder(String bucketFolder) { this.bucketFolder = bucketFolder; } /** *

* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path * bucketFolder/schema_name/table_name/. If this parameter isn't specified, then * the path used is schema_name/table_name/. *

* * @return An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path * bucketFolder/schema_name/table_name/. If this parameter isn't * specified, then the path used is schema_name/table_name/. */ public String getBucketFolder() { return this.bucketFolder; } /** *

* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path * bucketFolder/schema_name/table_name/. If this parameter isn't specified, then * the path used is schema_name/table_name/. *

* * @param bucketFolder * An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path * bucketFolder/schema_name/table_name/. If this parameter isn't * specified, then the path used is schema_name/table_name/. * @return Returns a reference to this object so that method calls can be chained together. */ public S3Settings withBucketFolder(String bucketFolder) { setBucketFolder(bucketFolder); return this; } /** *

* The name of the S3 bucket. *

* * @param bucketName * The name of the S3 bucket. */ public void setBucketName(String bucketName) { this.bucketName = bucketName; } /** *

* The name of the S3 bucket. *

* * @return The name of the S3 bucket. */ public String getBucketName() { return this.bucketName; } /** *

* The name of the S3 bucket. *

* * @param bucketName * The name of the S3 bucket. * @return Returns a reference to this object so that method calls can be chained together. */ public S3Settings withBucketName(String bucketName) { setBucketName(bucketName); return this; } /** *

* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either * set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies * to both .csv and .parquet file formats. *

* * @param compressionType * An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. * Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This * parameter applies to both .csv and .parquet file formats. * @see CompressionTypeValue */ public void setCompressionType(String compressionType) { this.compressionType = compressionType; } /** *

* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either * set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies * to both .csv and .parquet file formats. *

* * @return An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. * Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This * parameter applies to both .csv and .parquet file formats. * @see CompressionTypeValue */ public String getCompressionType() { return this.compressionType; } /** *

* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either * set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies * to both .csv and .parquet file formats. *

* * @param compressionType * An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. * Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This * parameter applies to both .csv and .parquet file formats. * @return Returns a reference to this object so that method calls can be chained together. * @see CompressionTypeValue */ public S3Settings withCompressionType(String compressionType) { setCompressionType(compressionType); return this; } /** *

* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either * set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies * to both .csv and .parquet file formats. *

* * @param compressionType * An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. * Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This * parameter applies to both .csv and .parquet file formats. * @see CompressionTypeValue */ public void setCompressionType(CompressionTypeValue compressionType) { withCompressionType(compressionType); } /** *

* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either * set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies * to both .csv and .parquet file formats. *

* * @param compressionType * An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. * Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This * parameter applies to both .csv and .parquet file formats. * @return Returns a reference to this object so that method calls can be chained together. * @see CompressionTypeValue */ public S3Settings withCompressionType(CompressionTypeValue compressionType) { this.compressionType = compressionType.toString(); return this; } /** *

* The type of server-side encryption that you want to use for your data. This encryption type is part of the * endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 * (the default) or SSE_KMS. *

* *

* For the ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the * existing value from SSE_S3 to SSE_KMS. *

*
*

* To use SSE_S3, you need an Identity and Access Management (IAM) role with permission to allow * "arn:aws:s3:::dms-*" to use the following actions: *

* * * @param encryptionMode * The type of server-side encryption that you want to use for your data. This encryption type is part of the * endpoint settings or the extra connections attributes for Amazon S3. You can choose either * SSE_S3 (the default) or SSE_KMS.

*

* For the ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t * change the existing value from SSE_S3 to SSE_KMS. *

*
*

* To use SSE_S3, you need an Identity and Access Management (IAM) role with permission to allow * "arn:aws:s3:::dms-*" to use the following actions: *

*