/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* * Do not modify this file. This file is generated from the dms-2016-01-01.normal.json service model. */ using System; using System.Collections.Generic; using System.Xml.Serialization; using System.Text; using System.IO; using System.Net; using Amazon.Runtime; using Amazon.Runtime.Internal; namespace Amazon.DatabaseMigrationService.Model { /// /// Settings for exporting data to Amazon S3. /// public partial class S3Settings { private bool? _addColumnName; private bool? _addTrailingPaddingCharacter; private string _bucketFolder; private string _bucketName; private CannedAclForObjectsValue _cannedAclForObjects; private bool? _cdcInsertsAndUpdates; private bool? _cdcInsertsOnly; private int? _cdcMaxBatchInterval; private int? _cdcMinFileSize; private string _cdcPath; private CompressionTypeValue _compressionType; private string _csvDelimiter; private string _csvNoSupValue; private string _csvNullValue; private string _csvRowDelimiter; private DataFormatValue _dataFormat; private int? _dataPageSize; private DatePartitionDelimiterValue _datePartitionDelimiter; private bool? _datePartitionEnabled; private DatePartitionSequenceValue _datePartitionSequence; private string _datePartitionTimezone; private int? _dictPageSizeLimit; private bool? _enableStatistics; private EncodingTypeValue _encodingType; private EncryptionModeValue _encryptionMode; private string _expectedBucketOwner; private string _externalTableDefinition; private bool? _glueCatalogGeneration; private int? _ignoreHeaderRows; private bool? _includeOpForFullLoad; private int? _maxFileSize; private bool? _parquetTimestampInMillisecond; private ParquetVersionValue _parquetVersion; private bool? _preserveTransactions; private bool? _rfc4180; private int? _rowGroupLength; private string _serverSideEncryptionKmsKeyId; private string _serviceAccessRoleArn; private string _timestampColumnName; private bool? _useCsvNoSupValue; private bool? _useTaskStartTimeForFullLoadTimestamp; /// /// Gets and sets the property AddColumnName. /// /// An optional parameter that, when set to true or y, you can /// use to add column name information to the .csv output file. /// /// /// /// The default value is false. Valid values are true, false, /// y, and n. /// /// public bool AddColumnName { get { return this._addColumnName.GetValueOrDefault(); } set { this._addColumnName = value; } } // Check to see if AddColumnName property is set internal bool IsSetAddColumnName() { return this._addColumnName.HasValue; } /// /// Gets and sets the property AddTrailingPaddingCharacter. /// /// Use the S3 target endpoint setting AddTrailingPaddingCharacter to add /// padding on string data. The default value is false. /// /// public bool AddTrailingPaddingCharacter { get { return this._addTrailingPaddingCharacter.GetValueOrDefault(); } set { this._addTrailingPaddingCharacter = value; } } // Check to see if AddTrailingPaddingCharacter property is set internal bool IsSetAddTrailingPaddingCharacter() { return this._addTrailingPaddingCharacter.HasValue; } /// /// Gets and sets the property BucketFolder. /// /// An optional parameter to set a folder name in the S3 bucket. If provided, tables /// are created in the path bucketFolder/schema_name/table_name/. /// If this parameter isn't specified, then the path used is schema_name/table_name/. /// /// /// public string BucketFolder { get { return this._bucketFolder; } set { this._bucketFolder = value; } } // Check to see if BucketFolder property is set internal bool IsSetBucketFolder() { return this._bucketFolder != null; } /// /// Gets and sets the property BucketName. /// /// The name of the S3 bucket. /// /// public string BucketName { get { return this._bucketName; } set { this._bucketName = value; } } // Check to see if BucketName property is set internal bool IsSetBucketName() { return this._bucketName != null; } /// /// Gets and sets the property CannedAclForObjects. /// /// A value that enables DMS to specify a predefined (canned) access control list for /// objects created in an Amazon S3 bucket as .csv or .parquet files. For more information /// about Amazon S3 canned ACLs, see Canned /// ACL in the Amazon S3 Developer Guide. /// /// /// /// The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, /// AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL. /// /// public CannedAclForObjectsValue CannedAclForObjects { get { return this._cannedAclForObjects; } set { this._cannedAclForObjects = value; } } // Check to see if CannedAclForObjects property is set internal bool IsSetCannedAclForObjects() { return this._cannedAclForObjects != null; } /// /// Gets and sets the property CdcInsertsAndUpdates. /// /// A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations /// to .csv or .parquet (columnar storage) output files. The default setting is false, /// but when CdcInsertsAndUpdates is set to true or y, /// only INSERTs and UPDATEs from the source database are migrated to the .csv or .parquet /// file. /// /// /// /// DMS supports the use of the .parquet files in versions 3.4.7 and later. /// /// /// /// How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad /// parameter. If IncludeOpForFullLoad is set to true, the first /// field of every CDC record is set to either I or U to indicate /// INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad /// is set to false, CDC records are written without an indication of INSERT /// or UPDATE operations at the source. For more information about how these settings /// work together, see Indicating /// Source DB Operations in Migrated S3 Data in the Database Migration Service /// User Guide.. /// /// /// /// DMS supports the use of the CdcInsertsAndUpdates parameter in versions /// 3.3.1 and later. /// /// /// /// CdcInsertsOnly and CdcInsertsAndUpdates can't both be set /// to true for the same endpoint. Set either CdcInsertsOnly /// or CdcInsertsAndUpdates to true for the same endpoint, but /// not both. /// /// /// public bool CdcInsertsAndUpdates { get { return this._cdcInsertsAndUpdates.GetValueOrDefault(); } set { this._cdcInsertsAndUpdates = value; } } // Check to see if CdcInsertsAndUpdates property is set internal bool IsSetCdcInsertsAndUpdates() { return this._cdcInsertsAndUpdates.HasValue; } /// /// Gets and sets the property CdcInsertsOnly. /// /// A value that enables a change data capture (CDC) load to write only INSERT operations /// to .csv or columnar storage (.parquet) output files. By default (the false /// setting), the first field in a .csv or .parquet record contains the letter I (INSERT), /// U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, /// or deleted at the source database for a CDC load to the target. /// /// /// /// If CdcInsertsOnly is set to true or y, only /// INSERTs from the source database are migrated to the .csv or .parquet file. For .csv /// format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad. /// If IncludeOpForFullLoad is set to true, the first field /// of every CDC record is set to I to indicate the INSERT operation at the source. If /// IncludeOpForFullLoad is set to false, every CDC record is /// written without a first field to indicate the INSERT operation at the source. For /// more information about how these settings work together, see Indicating /// Source DB Operations in Migrated S3 Data in the Database Migration Service /// User Guide.. /// /// /// /// DMS supports the interaction described preceding between the CdcInsertsOnly /// and IncludeOpForFullLoad parameters in versions 3.1.4 and later. /// /// /// /// CdcInsertsOnly and CdcInsertsAndUpdates can't both be set /// to true for the same endpoint. Set either CdcInsertsOnly /// or CdcInsertsAndUpdates to true for the same endpoint, but /// not both. /// /// /// public bool CdcInsertsOnly { get { return this._cdcInsertsOnly.GetValueOrDefault(); } set { this._cdcInsertsOnly = value; } } // Check to see if CdcInsertsOnly property is set internal bool IsSetCdcInsertsOnly() { return this._cdcInsertsOnly.HasValue; } /// /// Gets and sets the property CdcMaxBatchInterval. /// /// Maximum length of the interval, defined in seconds, after which to output a file to /// Amazon S3. /// /// /// /// When CdcMaxBatchInterval and CdcMinFileSize are both specified, /// the file write is triggered by whichever parameter condition is met first within an /// DMS CloudFormation template. /// /// /// /// The default value is 60 seconds. /// /// public int CdcMaxBatchInterval { get { return this._cdcMaxBatchInterval.GetValueOrDefault(); } set { this._cdcMaxBatchInterval = value; } } // Check to see if CdcMaxBatchInterval property is set internal bool IsSetCdcMaxBatchInterval() { return this._cdcMaxBatchInterval.HasValue; } /// /// Gets and sets the property CdcMinFileSize. /// /// Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3. /// /// /// /// When CdcMinFileSize and CdcMaxBatchInterval are both specified, /// the file write is triggered by whichever parameter condition is met first within an /// DMS CloudFormation template. /// /// /// /// The default value is 32 MB. /// /// public int CdcMinFileSize { get { return this._cdcMinFileSize.GetValueOrDefault(); } set { this._cdcMinFileSize = value; } } // Check to see if CdcMinFileSize property is set internal bool IsSetCdcMinFileSize() { return this._cdcMinFileSize.HasValue; } /// /// Gets and sets the property CdcPath. /// /// Specifies the folder path of CDC files. For an S3 source, this setting is required /// if a task captures change data; otherwise, it's optional. If CdcPath /// is set, DMS reads CDC files from this path and replicates the data changes to the /// target endpoint. For an S3 target if you set /// PreserveTransactions to true, DMS verifies that you /// have set this parameter to a folder path on your S3 target where DMS can save the /// transaction order for the CDC load. DMS creates this CDC folder path in either your /// S3 target working directory or the S3 target location specified by /// BucketFolder and /// BucketName . /// /// /// /// For example, if you specify CdcPath as MyChangedData, and /// you specify BucketName as MyTargetBucket but do not specify /// BucketFolder, DMS creates the CDC folder path following: MyTargetBucket/MyChangedData. /// /// /// /// If you specify the same CdcPath, and you specify BucketName /// as MyTargetBucket and BucketFolder as MyTargetData, /// DMS creates the CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData. /// /// /// /// For more information on CDC including transaction order on an S3 target, see Capturing /// data changes (CDC) including transaction order on the S3 target. /// /// /// /// This setting is supported in DMS versions 3.4.2 and later. /// /// /// public string CdcPath { get { return this._cdcPath; } set { this._cdcPath = value; } } // Check to see if CdcPath property is set internal bool IsSetCdcPath() { return this._cdcPath != null; } /// /// Gets and sets the property CompressionType. /// /// An optional parameter to use GZIP to compress the target files. Set to GZIP to compress /// the target files. Either set this parameter to NONE (the default) or don't use it /// to leave the files uncompressed. This parameter applies to both .csv and .parquet /// file formats. /// /// public CompressionTypeValue CompressionType { get { return this._compressionType; } set { this._compressionType = value; } } // Check to see if CompressionType property is set internal bool IsSetCompressionType() { return this._compressionType != null; } /// /// Gets and sets the property CsvDelimiter. /// /// The delimiter used to separate columns in the .csv file for both source and target. /// The default is a comma. /// /// public string CsvDelimiter { get { return this._csvDelimiter; } set { this._csvDelimiter = value; } } // Check to see if CsvDelimiter property is set internal bool IsSetCsvDelimiter() { return this._csvDelimiter != null; } /// /// Gets and sets the property CsvNoSupValue. /// /// This setting only applies if your Amazon S3 output files during a change data capture /// (CDC) load are written in .csv format. If /// UseCsvNoSupValue is set to true, specify a string value that you /// want DMS to use for all columns not included in the supplemental log. If you do not /// specify a string value, DMS uses the null value for these columns regardless of the /// UseCsvNoSupValue setting. /// /// /// /// This setting is supported in DMS versions 3.4.1 and later. /// /// /// public string CsvNoSupValue { get { return this._csvNoSupValue; } set { this._csvNoSupValue = value; } } // Check to see if CsvNoSupValue property is set internal bool IsSetCsvNoSupValue() { return this._csvNoSupValue != null; } /// /// Gets and sets the property CsvNullValue. /// /// An optional parameter that specifies how DMS treats null values. While handling the /// null value, you can use this parameter to pass a user-defined string as null when /// writing to the target. For example, when target columns are not nullable, you can /// use this option to differentiate between the empty string value and the null value. /// So, if you set this parameter value to the empty string ("" or ''), DMS treats the /// empty string as the null value instead of NULL. /// /// /// /// The default value is NULL. Valid values include any valid string. /// /// public string CsvNullValue { get { return this._csvNullValue; } set { this._csvNullValue = value; } } // Check to see if CsvNullValue property is set internal bool IsSetCsvNullValue() { return this._csvNullValue != null; } /// /// Gets and sets the property CsvRowDelimiter. /// /// The delimiter used to separate rows in the .csv file for both source and target. /// The default is a carriage return (\n). /// /// public string CsvRowDelimiter { get { return this._csvRowDelimiter; } set { this._csvRowDelimiter = value; } } // Check to see if CsvRowDelimiter property is set internal bool IsSetCsvRowDelimiter() { return this._csvRowDelimiter != null; } /// /// Gets and sets the property DataFormat. /// /// The format of the data that you want to use for output. You can choose one of the /// following: /// /// /// public DataFormatValue DataFormat { get { return this._dataFormat; } set { this._dataFormat = value; } } // Check to see if DataFormat property is set internal bool IsSetDataFormat() { return this._dataFormat != null; } /// /// Gets and sets the property DataPageSize. /// /// The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 /// MiB). This number is used for .parquet file format only. /// /// public int DataPageSize { get { return this._dataPageSize.GetValueOrDefault(); } set { this._dataPageSize = value; } } // Check to see if DataPageSize property is set internal bool IsSetDataPageSize() { return this._dataPageSize.HasValue; } /// /// Gets and sets the property DatePartitionDelimiter. /// /// Specifies a date separating delimiter to use during folder partitioning. The default /// value is SLASH. Use this parameter when DatePartitionedEnabled /// is set to true. /// /// public DatePartitionDelimiterValue DatePartitionDelimiter { get { return this._datePartitionDelimiter; } set { this._datePartitionDelimiter = value; } } // Check to see if DatePartitionDelimiter property is set internal bool IsSetDatePartitionDelimiter() { return this._datePartitionDelimiter != null; } /// /// Gets and sets the property DatePartitionEnabled. /// /// When set to true, this parameter partitions S3 bucket folders based on /// transaction commit dates. The default value is false. For more information /// about date-based folder partitioning, see Using /// date-based folder partitioning. /// /// public bool DatePartitionEnabled { get { return this._datePartitionEnabled.GetValueOrDefault(); } set { this._datePartitionEnabled = value; } } // Check to see if DatePartitionEnabled property is set internal bool IsSetDatePartitionEnabled() { return this._datePartitionEnabled.HasValue; } /// /// Gets and sets the property DatePartitionSequence. /// /// Identifies the sequence of the date format to use during folder partitioning. The /// default value is YYYYMMDD. Use this parameter when DatePartitionedEnabled /// is set to true. /// /// public DatePartitionSequenceValue DatePartitionSequence { get { return this._datePartitionSequence; } set { this._datePartitionSequence = value; } } // Check to see if DatePartitionSequence property is set internal bool IsSetDatePartitionSequence() { return this._datePartitionSequence != null; } /// /// Gets and sets the property DatePartitionTimezone. /// /// When creating an S3 target endpoint, set DatePartitionTimezone to convert /// the current UTC time into a specified time zone. The conversion occurs when a date /// partition folder is created and a CDC filename is generated. The time zone format /// is Area/Location. Use this parameter when DatePartitionedEnabled is set /// to true, as shown in the following example. /// /// /// /// s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", /// "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": /// "dms-nattarat-test"}' /// /// public string DatePartitionTimezone { get { return this._datePartitionTimezone; } set { this._datePartitionTimezone = value; } } // Check to see if DatePartitionTimezone property is set internal bool IsSetDatePartitionTimezone() { return this._datePartitionTimezone != null; } /// /// Gets and sets the property DictPageSizeLimit. /// /// The maximum size of an encoded dictionary page of a column. If the dictionary page /// exceeds this, this column is stored using an encoding type of PLAIN. /// This parameter defaults to 1024 * 1024 bytes (1 MiB), the maximum size of a dictionary /// page before it reverts to PLAIN encoding. This size is used for .parquet /// file format only. /// /// public int DictPageSizeLimit { get { return this._dictPageSizeLimit.GetValueOrDefault(); } set { this._dictPageSizeLimit = value; } } // Check to see if DictPageSizeLimit property is set internal bool IsSetDictPageSizeLimit() { return this._dictPageSizeLimit.HasValue; } /// /// Gets and sets the property EnableStatistics. /// /// A value that enables statistics for Parquet pages and row groups. Choose true /// to enable statistics, false to disable. Statistics include NULL, /// DISTINCT, MAX, and MIN values. This parameter /// defaults to true. This value is used for .parquet file format only. /// /// public bool EnableStatistics { get { return this._enableStatistics.GetValueOrDefault(); } set { this._enableStatistics = value; } } // Check to see if EnableStatistics property is set internal bool IsSetEnableStatistics() { return this._enableStatistics.HasValue; } /// /// Gets and sets the property EncodingType. /// /// The type of encoding you are using: /// /// /// public EncodingTypeValue EncodingType { get { return this._encodingType; } set { this._encodingType = value; } } // Check to see if EncodingType property is set internal bool IsSetEncodingType() { return this._encodingType != null; } /// /// Gets and sets the property EncryptionMode. /// /// The type of server-side encryption that you want to use for your data. This encryption /// type is part of the endpoint settings or the extra connections attributes for Amazon /// S3. You can choose either SSE_S3 (the default) or SSE_KMS. /// /// /// /// /// For the ModifyEndpoint operation, you can change the existing value of /// the EncryptionMode parameter from SSE_KMS to SSE_S3. /// But you can’t change the existing value from SSE_S3 to SSE_KMS. /// /// /// /// To use SSE_S3, you need an Identity and Access Management (IAM) role /// with permission to allow "arn:aws:s3:::dms-*" to use the following actions: /// /// /// public EncryptionModeValue EncryptionMode { get { return this._encryptionMode; } set { this._encryptionMode = value; } } // Check to see if EncryptionMode property is set internal bool IsSetEncryptionMode() { return this._encryptionMode != null; } /// /// Gets and sets the property ExpectedBucketOwner. /// /// To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner /// endpoint setting. /// /// /// /// Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}' /// /// /// /// /// When you make a request to test a connection or perform a migration, S3 checks the /// account ID of the bucket owner against the specified parameter. /// /// public string ExpectedBucketOwner { get { return this._expectedBucketOwner; } set { this._expectedBucketOwner = value; } } // Check to see if ExpectedBucketOwner property is set internal bool IsSetExpectedBucketOwner() { return this._expectedBucketOwner != null; } /// /// Gets and sets the property ExternalTableDefinition. /// /// Specifies how tables are defined in the S3 source files only. /// /// public string ExternalTableDefinition { get { return this._externalTableDefinition; } set { this._externalTableDefinition = value; } } // Check to see if ExternalTableDefinition property is set internal bool IsSetExternalTableDefinition() { return this._externalTableDefinition != null; } /// /// Gets and sets the property GlueCatalogGeneration. /// /// When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you /// use Athena to query your data. /// /// public bool GlueCatalogGeneration { get { return this._glueCatalogGeneration.GetValueOrDefault(); } set { this._glueCatalogGeneration = value; } } // Check to see if GlueCatalogGeneration property is set internal bool IsSetGlueCatalogGeneration() { return this._glueCatalogGeneration.HasValue; } /// /// Gets and sets the property IgnoreHeaderRows. /// /// When this value is set to 1, DMS ignores the first row header in a .csv file. A value /// of 1 turns on the feature; a value of 0 turns off the feature. /// /// /// /// The default is 0. /// /// public int IgnoreHeaderRows { get { return this._ignoreHeaderRows.GetValueOrDefault(); } set { this._ignoreHeaderRows = value; } } // Check to see if IgnoreHeaderRows property is set internal bool IsSetIgnoreHeaderRows() { return this._ignoreHeaderRows.HasValue; } /// /// Gets and sets the property IncludeOpForFullLoad. /// /// A value that enables a full load to write INSERT operations to the comma-separated /// value (.csv) or .parquet output files only to indicate how the rows were added to /// the source database. /// /// /// /// DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and /// later. /// /// /// /// DMS supports the use of the .parquet files with the IncludeOpForFullLoad /// parameter in versions 3.4.7 and later. /// /// /// /// For full load, records can only be inserted. By default (the false setting), /// no information is recorded in these output files for a full load to indicate that /// the rows were inserted at the source database. If IncludeOpForFullLoad /// is set to true or y, the INSERT is recorded as an I annotation /// in the first field of the .csv file. This allows the format of your target records /// from a full load to be consistent with the target records from a CDC load. /// /// /// /// This setting works together with the CdcInsertsOnly and the CdcInsertsAndUpdates /// parameters for output to .csv files only. For more information about how these settings /// work together, see Indicating /// Source DB Operations in Migrated S3 Data in the Database Migration Service /// User Guide.. /// /// /// public bool IncludeOpForFullLoad { get { return this._includeOpForFullLoad.GetValueOrDefault(); } set { this._includeOpForFullLoad = value; } } // Check to see if IncludeOpForFullLoad property is set internal bool IsSetIncludeOpForFullLoad() { return this._includeOpForFullLoad.HasValue; } /// /// Gets and sets the property MaxFileSize. /// /// A value that specifies the maximum size (in KB) of any .csv file to be created while /// migrating to an S3 target during full load. /// /// /// /// The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576. /// /// public int MaxFileSize { get { return this._maxFileSize.GetValueOrDefault(); } set { this._maxFileSize = value; } } // Check to see if MaxFileSize property is set internal bool IsSetMaxFileSize() { return this._maxFileSize.HasValue; } /// /// Gets and sets the property ParquetTimestampInMillisecond. /// /// A value that specifies the precision of any TIMESTAMP column values that /// are written to an Amazon S3 object file in .parquet format. /// /// /// /// DMS supports the ParquetTimestampInMillisecond parameter in versions /// 3.1.4 and later. /// /// /// /// When ParquetTimestampInMillisecond is set to true or y, /// DMS writes all TIMESTAMP columns in a .parquet formatted file with millisecond /// precision. Otherwise, DMS writes them with microsecond precision. /// /// /// /// Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP /// values. Set this parameter to true for S3 endpoint object files that /// are .parquet formatted only if you plan to query or process the data with Athena or /// Glue. /// /// /// /// DMS writes any TIMESTAMP column values written to an S3 file in .csv /// format with microsecond precision. /// /// /// /// Setting ParquetTimestampInMillisecond has no effect on the string format /// of the timestamp column value that is inserted by setting the TimestampColumnName /// parameter. /// /// /// public bool ParquetTimestampInMillisecond { get { return this._parquetTimestampInMillisecond.GetValueOrDefault(); } set { this._parquetTimestampInMillisecond = value; } } // Check to see if ParquetTimestampInMillisecond property is set internal bool IsSetParquetTimestampInMillisecond() { return this._parquetTimestampInMillisecond.HasValue; } /// /// Gets and sets the property ParquetVersion. /// /// The version of the Apache Parquet format that you want to use: parquet_1_0 /// (the default) or parquet_2_0. /// /// public ParquetVersionValue ParquetVersion { get { return this._parquetVersion; } set { this._parquetVersion = value; } } // Check to see if ParquetVersion property is set internal bool IsSetParquetVersion() { return this._parquetVersion != null; } /// /// Gets and sets the property PreserveTransactions. /// /// If set to true, DMS saves the transaction order for a change data capture /// (CDC) load on the Amazon S3 target specified by /// CdcPath . For more information, see Capturing /// data changes (CDC) including transaction order on the S3 target. /// /// /// /// This setting is supported in DMS versions 3.4.2 and later. /// /// /// public bool PreserveTransactions { get { return this._preserveTransactions.GetValueOrDefault(); } set { this._preserveTransactions = value; } } // Check to see if PreserveTransactions property is set internal bool IsSetPreserveTransactions() { return this._preserveTransactions.HasValue; } /// /// Gets and sets the property Rfc4180. /// /// For an S3 source, when this value is set to true or y, each /// leading double quotation mark has to be followed by an ending double quotation mark. /// This formatting complies with RFC 4180. When this value is set to false /// or n, string literals are copied to the target as is. In this case, a /// delimiter (row or column) signals the end of the field. Thus, you can't use a delimiter /// as part of the string, because it signals the end of the value. /// /// /// /// For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 /// for data migrated to Amazon S3 using .csv file format only. When this value is set /// to true or y using Amazon S3 as a target, if the data has /// quotation marks or newline characters in it, DMS encloses the entire column with an /// additional pair of double quotation marks ("). Every quotation mark within the data /// is repeated twice. /// /// /// /// The default value is true. Valid values include true, false, /// y, and n. /// /// public bool Rfc4180 { get { return this._rfc4180.GetValueOrDefault(); } set { this._rfc4180 = value; } } // Check to see if Rfc4180 property is set internal bool IsSetRfc4180() { return this._rfc4180.HasValue; } /// /// Gets and sets the property RowGroupLength. /// /// The number of rows in a row group. A smaller row group size provides faster reads. /// But as the number of row groups grows, the slower writes become. This parameter defaults /// to 10,000 rows. This number is used for .parquet file format only. /// /// /// /// If you choose a value larger than the maximum, RowGroupLength is set /// to the max row group length in bytes (64 * 1024 * 1024). /// /// public int RowGroupLength { get { return this._rowGroupLength.GetValueOrDefault(); } set { this._rowGroupLength = value; } } // Check to see if RowGroupLength property is set internal bool IsSetRowGroupLength() { return this._rowGroupLength.HasValue; } /// /// Gets and sets the property ServerSideEncryptionKmsKeyId. /// /// If you are using SSE_KMS for the EncryptionMode, provide /// the KMS key ID. The key that you use needs an attached policy that enables Identity /// and Access Management (IAM) user permissions and allows use of the key. /// /// /// /// Here is a CLI example: aws dms create-endpoint --endpoint-identifier value /// --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value /// /// /// public string ServerSideEncryptionKmsKeyId { get { return this._serverSideEncryptionKmsKeyId; } set { this._serverSideEncryptionKmsKeyId = value; } } // Check to see if ServerSideEncryptionKmsKeyId property is set internal bool IsSetServerSideEncryptionKmsKeyId() { return this._serverSideEncryptionKmsKeyId != null; } /// /// Gets and sets the property ServiceAccessRoleArn. /// /// The Amazon Resource Name (ARN) used by the service to access the IAM role. The role /// must allow the iam:PassRole action. It is a required parameter that enables /// DMS to write and read objects from an S3 bucket. /// /// public string ServiceAccessRoleArn { get { return this._serviceAccessRoleArn; } set { this._serviceAccessRoleArn = value; } } // Check to see if ServiceAccessRoleArn property is set internal bool IsSetServiceAccessRoleArn() { return this._serviceAccessRoleArn != null; } /// /// Gets and sets the property TimestampColumnName. /// /// A value that when nonblank causes DMS to add a column with timestamp information to /// the endpoint data for an Amazon S3 target. /// /// /// /// DMS supports the TimestampColumnName parameter in versions 3.1.4 and /// later. /// /// /// /// DMS includes an additional STRING column in the .csv or .parquet object /// files of your migrated data when you set TimestampColumnName to a nonblank /// value. /// /// /// /// For a full load, each row of this timestamp column contains a timestamp for when the /// data was transferred from the source to the target by DMS. /// /// /// /// For a change data capture (CDC) load, each row of the timestamp column contains the /// timestamp for the commit of that row in the source database. /// /// /// /// The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. /// By default, the precision of this value is in microseconds. For a CDC load, the rounding /// of the precision depends on the commit timestamp supported by DMS for the source database. /// /// /// /// When the AddColumnName parameter is set to true, DMS also /// includes a name for the timestamp column that you set with TimestampColumnName. /// /// public string TimestampColumnName { get { return this._timestampColumnName; } set { this._timestampColumnName = value; } } // Check to see if TimestampColumnName property is set internal bool IsSetTimestampColumnName() { return this._timestampColumnName != null; } /// /// Gets and sets the property UseCsvNoSupValue. /// /// This setting applies if the S3 output files during a change data capture (CDC) load /// are written in .csv format. If set to true for columns not included in /// the supplemental log, DMS uses the value specified by /// CsvNoSupValue . If not set or set to false, DMS uses /// the null value for these columns. /// /// /// /// This setting is supported in DMS versions 3.4.1 and later. /// /// /// public bool UseCsvNoSupValue { get { return this._useCsvNoSupValue.GetValueOrDefault(); } set { this._useCsvNoSupValue = value; } } // Check to see if UseCsvNoSupValue property is set internal bool IsSetUseCsvNoSupValue() { return this._useCsvNoSupValue.HasValue; } /// /// Gets and sets the property UseTaskStartTimeForFullLoadTimestamp. /// /// When set to true, this parameter uses the task start time as the timestamp column /// value instead of the time data is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp /// is set to true, each row of the timestamp column contains the task start /// time. For CDC loads, each row of the timestamp column contains the transaction commit /// time. /// /// /// /// When useTaskStartTimeForFullLoadTimestamp is set to false, /// the full load timestamp in the timestamp column increments with the time data arrives /// at the target. /// /// public bool UseTaskStartTimeForFullLoadTimestamp { get { return this._useTaskStartTimeForFullLoadTimestamp.GetValueOrDefault(); } set { this._useTaskStartTimeForFullLoadTimestamp = value; } } // Check to see if UseTaskStartTimeForFullLoadTimestamp property is set internal bool IsSetUseTaskStartTimeForFullLoadTimestamp() { return this._useTaskStartTimeForFullLoadTimestamp.HasValue; } } }