/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace MachineLearning { namespace Model { /** *

The data specification of an Amazon Relational Database Service (Amazon RDS) * DataSource.

See Also:

AWS * API Reference

*/ class RDSDataSpec { public: AWS_MACHINELEARNING_API RDSDataSpec(); AWS_MACHINELEARNING_API RDSDataSpec(Aws::Utils::Json::JsonView jsonValue); AWS_MACHINELEARNING_API RDSDataSpec& operator=(Aws::Utils::Json::JsonView jsonValue); AWS_MACHINELEARNING_API Aws::Utils::Json::JsonValue Jsonize() const; /** *

Describes the DatabaseName and InstanceIdentifier * of an Amazon RDS database.

*/ inline const RDSDatabase& GetDatabaseInformation() const{ return m_databaseInformation; } /** *

Describes the DatabaseName and InstanceIdentifier * of an Amazon RDS database.

*/ inline bool DatabaseInformationHasBeenSet() const { return m_databaseInformationHasBeenSet; } /** *

Describes the DatabaseName and InstanceIdentifier * of an Amazon RDS database.

*/ inline void SetDatabaseInformation(const RDSDatabase& value) { m_databaseInformationHasBeenSet = true; m_databaseInformation = value; } /** *

Describes the DatabaseName and InstanceIdentifier * of an Amazon RDS database.

*/ inline void SetDatabaseInformation(RDSDatabase&& value) { m_databaseInformationHasBeenSet = true; m_databaseInformation = std::move(value); } /** *

Describes the DatabaseName and InstanceIdentifier * of an Amazon RDS database.

*/ inline RDSDataSpec& WithDatabaseInformation(const RDSDatabase& value) { SetDatabaseInformation(value); return *this;} /** *

Describes the DatabaseName and InstanceIdentifier * of an Amazon RDS database.

*/ inline RDSDataSpec& WithDatabaseInformation(RDSDatabase&& value) { SetDatabaseInformation(std::move(value)); return *this;} /** *

The query that is used to retrieve the observation data for the * DataSource.

*/ inline const Aws::String& GetSelectSqlQuery() const{ return m_selectSqlQuery; } /** *

The query that is used to retrieve the observation data for the * DataSource.

*/ inline bool SelectSqlQueryHasBeenSet() const { return m_selectSqlQueryHasBeenSet; } /** *

The query that is used to retrieve the observation data for the * DataSource.

*/ inline void SetSelectSqlQuery(const Aws::String& value) { m_selectSqlQueryHasBeenSet = true; m_selectSqlQuery = value; } /** *

The query that is used to retrieve the observation data for the * DataSource.

*/ inline void SetSelectSqlQuery(Aws::String&& value) { m_selectSqlQueryHasBeenSet = true; m_selectSqlQuery = std::move(value); } /** *

The query that is used to retrieve the observation data for the * DataSource.

*/ inline void SetSelectSqlQuery(const char* value) { m_selectSqlQueryHasBeenSet = true; m_selectSqlQuery.assign(value); } /** *

The query that is used to retrieve the observation data for the * DataSource.

*/ inline RDSDataSpec& WithSelectSqlQuery(const Aws::String& value) { SetSelectSqlQuery(value); return *this;} /** *

The query that is used to retrieve the observation data for the * DataSource.

*/ inline RDSDataSpec& WithSelectSqlQuery(Aws::String&& value) { SetSelectSqlQuery(std::move(value)); return *this;} /** *

The query that is used to retrieve the observation data for the * DataSource.

*/ inline RDSDataSpec& WithSelectSqlQuery(const char* value) { SetSelectSqlQuery(value); return *this;} /** *

The AWS Identity and Access Management (IAM) credentials that are used * connect to the Amazon RDS database.

*/ inline const RDSDatabaseCredentials& GetDatabaseCredentials() const{ return m_databaseCredentials; } /** *

The AWS Identity and Access Management (IAM) credentials that are used * connect to the Amazon RDS database.

*/ inline bool DatabaseCredentialsHasBeenSet() const { return m_databaseCredentialsHasBeenSet; } /** *

The AWS Identity and Access Management (IAM) credentials that are used * connect to the Amazon RDS database.

*/ inline void SetDatabaseCredentials(const RDSDatabaseCredentials& value) { m_databaseCredentialsHasBeenSet = true; m_databaseCredentials = value; } /** *

The AWS Identity and Access Management (IAM) credentials that are used * connect to the Amazon RDS database.

*/ inline void SetDatabaseCredentials(RDSDatabaseCredentials&& value) { m_databaseCredentialsHasBeenSet = true; m_databaseCredentials = std::move(value); } /** *

The AWS Identity and Access Management (IAM) credentials that are used * connect to the Amazon RDS database.

*/ inline RDSDataSpec& WithDatabaseCredentials(const RDSDatabaseCredentials& value) { SetDatabaseCredentials(value); return *this;} /** *

The AWS Identity and Access Management (IAM) credentials that are used * connect to the Amazon RDS database.

*/ inline RDSDataSpec& WithDatabaseCredentials(RDSDatabaseCredentials&& value) { SetDatabaseCredentials(std::move(value)); return *this;} /** *

The Amazon S3 location for staging Amazon RDS data. The data retrieved from * Amazon RDS using SelectSqlQuery is stored in this location.

*/ inline const Aws::String& GetS3StagingLocation() const{ return m_s3StagingLocation; } /** *

The Amazon S3 location for staging Amazon RDS data. The data retrieved from * Amazon RDS using SelectSqlQuery is stored in this location.

*/ inline bool S3StagingLocationHasBeenSet() const { return m_s3StagingLocationHasBeenSet; } /** *

The Amazon S3 location for staging Amazon RDS data. The data retrieved from * Amazon RDS using SelectSqlQuery is stored in this location.

*/ inline void SetS3StagingLocation(const Aws::String& value) { m_s3StagingLocationHasBeenSet = true; m_s3StagingLocation = value; } /** *

The Amazon S3 location for staging Amazon RDS data. The data retrieved from * Amazon RDS using SelectSqlQuery is stored in this location.

*/ inline void SetS3StagingLocation(Aws::String&& value) { m_s3StagingLocationHasBeenSet = true; m_s3StagingLocation = std::move(value); } /** *

The Amazon S3 location for staging Amazon RDS data. The data retrieved from * Amazon RDS using SelectSqlQuery is stored in this location.

*/ inline void SetS3StagingLocation(const char* value) { m_s3StagingLocationHasBeenSet = true; m_s3StagingLocation.assign(value); } /** *

The Amazon S3 location for staging Amazon RDS data. The data retrieved from * Amazon RDS using SelectSqlQuery is stored in this location.

*/ inline RDSDataSpec& WithS3StagingLocation(const Aws::String& value) { SetS3StagingLocation(value); return *this;} /** *

The Amazon S3 location for staging Amazon RDS data. The data retrieved from * Amazon RDS using SelectSqlQuery is stored in this location.

*/ inline RDSDataSpec& WithS3StagingLocation(Aws::String&& value) { SetS3StagingLocation(std::move(value)); return *this;} /** *

The Amazon S3 location for staging Amazon RDS data. The data retrieved from * Amazon RDS using SelectSqlQuery is stored in this location.

*/ inline RDSDataSpec& WithS3StagingLocation(const char* value) { SetS3StagingLocation(value); return *this;} /** *

A JSON string that represents the splitting and rearrangement processing to * be applied to a DataSource. If the DataRearrangement * parameter is not provided, all of the input data is used to create the * Datasource.

There are multiple parameters that control what * data is used to create a datasource:

  • * percentBegin

    Use percentBegin to indicate * the beginning of the range of the data used to create the Datasource. If you do * not include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * percentEnd

    Use percentEnd to indicate the * end of the range of the data used to create the Datasource. If you do not * include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * complement

    The complement parameter * instructs Amazon ML to use the data that is not included in the range of * percentBegin to percentEnd to create a datasource. The * complement parameter is useful if you need to create complementary * datasources for training and evaluation. To create a complementary datasource, * use the same values for percentBegin and percentEnd, * along with the complement parameter.

    For example, the * following two datasources do not share any data, and can be used to train and * evaluate a model. The first datasource has 25 percent of the data, and the * second one has 75 percent of the data.

    Datasource for evaluation: * {"splitting":{"percentBegin":0, "percentEnd":25}}

    *

    Datasource for training: {"splitting":{"percentBegin":0, * "percentEnd":25, "complement":"true"}}

  • * strategy

    To change how Amazon ML splits the data for a * datasource, use the strategy parameter.

    The default value * for the strategy parameter is sequential, meaning that * Amazon ML takes all of the data records between the percentBegin * and percentEnd parameters for the datasource, in the order that the * records appear in the input data.

    The following two * DataRearrangement lines are examples of sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential"}}

    Datasource for training: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential", "complement":"true"}}

    To randomly split * the input data into the proportions indicated by the percentBegin and percentEnd * parameters, set the strategy parameter to random and * provide a string that is used as the seed value for the random data splitting * (for example, you can use the S3 path to your data as the random seed string). * If you choose the random split strategy, Amazon ML assigns each row of data a * pseudo-random number between 0 and 100, and then selects the rows that have an * assigned number between percentBegin and percentEnd. * Pseudo-random numbers are assigned using both the input seed string value and * the byte offset as a seed, so changing the data results in a different split. * Any existing ordering is preserved. The random splitting strategy ensures that * variables in the training and evaluation data are distributed similarly. It is * useful in the cases where the input data may have an implicit sort order, which * would otherwise result in training and evaluation datasources containing * non-similar data records.

    The following two * DataRearrangement lines are examples of non-sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", * "randomSeed"="s3://my_s3_path/bucket/file.csv"}}

    Datasource for * training: {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", * "complement":"true"}}

*/ inline const Aws::String& GetDataRearrangement() const{ return m_dataRearrangement; } /** *

A JSON string that represents the splitting and rearrangement processing to * be applied to a DataSource. If the DataRearrangement * parameter is not provided, all of the input data is used to create the * Datasource.

There are multiple parameters that control what * data is used to create a datasource:

  • * percentBegin

    Use percentBegin to indicate * the beginning of the range of the data used to create the Datasource. If you do * not include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * percentEnd

    Use percentEnd to indicate the * end of the range of the data used to create the Datasource. If you do not * include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * complement

    The complement parameter * instructs Amazon ML to use the data that is not included in the range of * percentBegin to percentEnd to create a datasource. The * complement parameter is useful if you need to create complementary * datasources for training and evaluation. To create a complementary datasource, * use the same values for percentBegin and percentEnd, * along with the complement parameter.

    For example, the * following two datasources do not share any data, and can be used to train and * evaluate a model. The first datasource has 25 percent of the data, and the * second one has 75 percent of the data.

    Datasource for evaluation: * {"splitting":{"percentBegin":0, "percentEnd":25}}

    *

    Datasource for training: {"splitting":{"percentBegin":0, * "percentEnd":25, "complement":"true"}}

  • * strategy

    To change how Amazon ML splits the data for a * datasource, use the strategy parameter.

    The default value * for the strategy parameter is sequential, meaning that * Amazon ML takes all of the data records between the percentBegin * and percentEnd parameters for the datasource, in the order that the * records appear in the input data.

    The following two * DataRearrangement lines are examples of sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential"}}

    Datasource for training: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential", "complement":"true"}}

    To randomly split * the input data into the proportions indicated by the percentBegin and percentEnd * parameters, set the strategy parameter to random and * provide a string that is used as the seed value for the random data splitting * (for example, you can use the S3 path to your data as the random seed string). * If you choose the random split strategy, Amazon ML assigns each row of data a * pseudo-random number between 0 and 100, and then selects the rows that have an * assigned number between percentBegin and percentEnd. * Pseudo-random numbers are assigned using both the input seed string value and * the byte offset as a seed, so changing the data results in a different split. * Any existing ordering is preserved. The random splitting strategy ensures that * variables in the training and evaluation data are distributed similarly. It is * useful in the cases where the input data may have an implicit sort order, which * would otherwise result in training and evaluation datasources containing * non-similar data records.

    The following two * DataRearrangement lines are examples of non-sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", * "randomSeed"="s3://my_s3_path/bucket/file.csv"}}

    Datasource for * training: {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", * "complement":"true"}}

*/ inline bool DataRearrangementHasBeenSet() const { return m_dataRearrangementHasBeenSet; } /** *

A JSON string that represents the splitting and rearrangement processing to * be applied to a DataSource. If the DataRearrangement * parameter is not provided, all of the input data is used to create the * Datasource.

There are multiple parameters that control what * data is used to create a datasource:

  • * percentBegin

    Use percentBegin to indicate * the beginning of the range of the data used to create the Datasource. If you do * not include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * percentEnd

    Use percentEnd to indicate the * end of the range of the data used to create the Datasource. If you do not * include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * complement

    The complement parameter * instructs Amazon ML to use the data that is not included in the range of * percentBegin to percentEnd to create a datasource. The * complement parameter is useful if you need to create complementary * datasources for training and evaluation. To create a complementary datasource, * use the same values for percentBegin and percentEnd, * along with the complement parameter.

    For example, the * following two datasources do not share any data, and can be used to train and * evaluate a model. The first datasource has 25 percent of the data, and the * second one has 75 percent of the data.

    Datasource for evaluation: * {"splitting":{"percentBegin":0, "percentEnd":25}}

    *

    Datasource for training: {"splitting":{"percentBegin":0, * "percentEnd":25, "complement":"true"}}

  • * strategy

    To change how Amazon ML splits the data for a * datasource, use the strategy parameter.

    The default value * for the strategy parameter is sequential, meaning that * Amazon ML takes all of the data records between the percentBegin * and percentEnd parameters for the datasource, in the order that the * records appear in the input data.

    The following two * DataRearrangement lines are examples of sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential"}}

    Datasource for training: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential", "complement":"true"}}

    To randomly split * the input data into the proportions indicated by the percentBegin and percentEnd * parameters, set the strategy parameter to random and * provide a string that is used as the seed value for the random data splitting * (for example, you can use the S3 path to your data as the random seed string). * If you choose the random split strategy, Amazon ML assigns each row of data a * pseudo-random number between 0 and 100, and then selects the rows that have an * assigned number between percentBegin and percentEnd. * Pseudo-random numbers are assigned using both the input seed string value and * the byte offset as a seed, so changing the data results in a different split. * Any existing ordering is preserved. The random splitting strategy ensures that * variables in the training and evaluation data are distributed similarly. It is * useful in the cases where the input data may have an implicit sort order, which * would otherwise result in training and evaluation datasources containing * non-similar data records.

    The following two * DataRearrangement lines are examples of non-sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", * "randomSeed"="s3://my_s3_path/bucket/file.csv"}}

    Datasource for * training: {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", * "complement":"true"}}

*/ inline void SetDataRearrangement(const Aws::String& value) { m_dataRearrangementHasBeenSet = true; m_dataRearrangement = value; } /** *

A JSON string that represents the splitting and rearrangement processing to * be applied to a DataSource. If the DataRearrangement * parameter is not provided, all of the input data is used to create the * Datasource.

There are multiple parameters that control what * data is used to create a datasource:

  • * percentBegin

    Use percentBegin to indicate * the beginning of the range of the data used to create the Datasource. If you do * not include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * percentEnd

    Use percentEnd to indicate the * end of the range of the data used to create the Datasource. If you do not * include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * complement

    The complement parameter * instructs Amazon ML to use the data that is not included in the range of * percentBegin to percentEnd to create a datasource. The * complement parameter is useful if you need to create complementary * datasources for training and evaluation. To create a complementary datasource, * use the same values for percentBegin and percentEnd, * along with the complement parameter.

    For example, the * following two datasources do not share any data, and can be used to train and * evaluate a model. The first datasource has 25 percent of the data, and the * second one has 75 percent of the data.

    Datasource for evaluation: * {"splitting":{"percentBegin":0, "percentEnd":25}}

    *

    Datasource for training: {"splitting":{"percentBegin":0, * "percentEnd":25, "complement":"true"}}

  • * strategy

    To change how Amazon ML splits the data for a * datasource, use the strategy parameter.

    The default value * for the strategy parameter is sequential, meaning that * Amazon ML takes all of the data records between the percentBegin * and percentEnd parameters for the datasource, in the order that the * records appear in the input data.

    The following two * DataRearrangement lines are examples of sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential"}}

    Datasource for training: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential", "complement":"true"}}

    To randomly split * the input data into the proportions indicated by the percentBegin and percentEnd * parameters, set the strategy parameter to random and * provide a string that is used as the seed value for the random data splitting * (for example, you can use the S3 path to your data as the random seed string). * If you choose the random split strategy, Amazon ML assigns each row of data a * pseudo-random number between 0 and 100, and then selects the rows that have an * assigned number between percentBegin and percentEnd. * Pseudo-random numbers are assigned using both the input seed string value and * the byte offset as a seed, so changing the data results in a different split. * Any existing ordering is preserved. The random splitting strategy ensures that * variables in the training and evaluation data are distributed similarly. It is * useful in the cases where the input data may have an implicit sort order, which * would otherwise result in training and evaluation datasources containing * non-similar data records.

    The following two * DataRearrangement lines are examples of non-sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", * "randomSeed"="s3://my_s3_path/bucket/file.csv"}}

    Datasource for * training: {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", * "complement":"true"}}

*/ inline void SetDataRearrangement(Aws::String&& value) { m_dataRearrangementHasBeenSet = true; m_dataRearrangement = std::move(value); } /** *

A JSON string that represents the splitting and rearrangement processing to * be applied to a DataSource. If the DataRearrangement * parameter is not provided, all of the input data is used to create the * Datasource.

There are multiple parameters that control what * data is used to create a datasource:

  • * percentBegin

    Use percentBegin to indicate * the beginning of the range of the data used to create the Datasource. If you do * not include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * percentEnd

    Use percentEnd to indicate the * end of the range of the data used to create the Datasource. If you do not * include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * complement

    The complement parameter * instructs Amazon ML to use the data that is not included in the range of * percentBegin to percentEnd to create a datasource. The * complement parameter is useful if you need to create complementary * datasources for training and evaluation. To create a complementary datasource, * use the same values for percentBegin and percentEnd, * along with the complement parameter.

    For example, the * following two datasources do not share any data, and can be used to train and * evaluate a model. The first datasource has 25 percent of the data, and the * second one has 75 percent of the data.

    Datasource for evaluation: * {"splitting":{"percentBegin":0, "percentEnd":25}}

    *

    Datasource for training: {"splitting":{"percentBegin":0, * "percentEnd":25, "complement":"true"}}

  • * strategy

    To change how Amazon ML splits the data for a * datasource, use the strategy parameter.

    The default value * for the strategy parameter is sequential, meaning that * Amazon ML takes all of the data records between the percentBegin * and percentEnd parameters for the datasource, in the order that the * records appear in the input data.

    The following two * DataRearrangement lines are examples of sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential"}}

    Datasource for training: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential", "complement":"true"}}

    To randomly split * the input data into the proportions indicated by the percentBegin and percentEnd * parameters, set the strategy parameter to random and * provide a string that is used as the seed value for the random data splitting * (for example, you can use the S3 path to your data as the random seed string). * If you choose the random split strategy, Amazon ML assigns each row of data a * pseudo-random number between 0 and 100, and then selects the rows that have an * assigned number between percentBegin and percentEnd. * Pseudo-random numbers are assigned using both the input seed string value and * the byte offset as a seed, so changing the data results in a different split. * Any existing ordering is preserved. The random splitting strategy ensures that * variables in the training and evaluation data are distributed similarly. It is * useful in the cases where the input data may have an implicit sort order, which * would otherwise result in training and evaluation datasources containing * non-similar data records.

    The following two * DataRearrangement lines are examples of non-sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", * "randomSeed"="s3://my_s3_path/bucket/file.csv"}}

    Datasource for * training: {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", * "complement":"true"}}

*/ inline void SetDataRearrangement(const char* value) { m_dataRearrangementHasBeenSet = true; m_dataRearrangement.assign(value); } /** *

A JSON string that represents the splitting and rearrangement processing to * be applied to a DataSource. If the DataRearrangement * parameter is not provided, all of the input data is used to create the * Datasource.

There are multiple parameters that control what * data is used to create a datasource:

  • * percentBegin

    Use percentBegin to indicate * the beginning of the range of the data used to create the Datasource. If you do * not include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * percentEnd

    Use percentEnd to indicate the * end of the range of the data used to create the Datasource. If you do not * include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * complement

    The complement parameter * instructs Amazon ML to use the data that is not included in the range of * percentBegin to percentEnd to create a datasource. The * complement parameter is useful if you need to create complementary * datasources for training and evaluation. To create a complementary datasource, * use the same values for percentBegin and percentEnd, * along with the complement parameter.

    For example, the * following two datasources do not share any data, and can be used to train and * evaluate a model. The first datasource has 25 percent of the data, and the * second one has 75 percent of the data.

    Datasource for evaluation: * {"splitting":{"percentBegin":0, "percentEnd":25}}

    *

    Datasource for training: {"splitting":{"percentBegin":0, * "percentEnd":25, "complement":"true"}}

  • * strategy

    To change how Amazon ML splits the data for a * datasource, use the strategy parameter.

    The default value * for the strategy parameter is sequential, meaning that * Amazon ML takes all of the data records between the percentBegin * and percentEnd parameters for the datasource, in the order that the * records appear in the input data.

    The following two * DataRearrangement lines are examples of sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential"}}

    Datasource for training: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential", "complement":"true"}}

    To randomly split * the input data into the proportions indicated by the percentBegin and percentEnd * parameters, set the strategy parameter to random and * provide a string that is used as the seed value for the random data splitting * (for example, you can use the S3 path to your data as the random seed string). * If you choose the random split strategy, Amazon ML assigns each row of data a * pseudo-random number between 0 and 100, and then selects the rows that have an * assigned number between percentBegin and percentEnd. * Pseudo-random numbers are assigned using both the input seed string value and * the byte offset as a seed, so changing the data results in a different split. * Any existing ordering is preserved. The random splitting strategy ensures that * variables in the training and evaluation data are distributed similarly. It is * useful in the cases where the input data may have an implicit sort order, which * would otherwise result in training and evaluation datasources containing * non-similar data records.

    The following two * DataRearrangement lines are examples of non-sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", * "randomSeed"="s3://my_s3_path/bucket/file.csv"}}

    Datasource for * training: {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", * "complement":"true"}}

*/ inline RDSDataSpec& WithDataRearrangement(const Aws::String& value) { SetDataRearrangement(value); return *this;} /** *

A JSON string that represents the splitting and rearrangement processing to * be applied to a DataSource. If the DataRearrangement * parameter is not provided, all of the input data is used to create the * Datasource.

There are multiple parameters that control what * data is used to create a datasource:

  • * percentBegin

    Use percentBegin to indicate * the beginning of the range of the data used to create the Datasource. If you do * not include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * percentEnd

    Use percentEnd to indicate the * end of the range of the data used to create the Datasource. If you do not * include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * complement

    The complement parameter * instructs Amazon ML to use the data that is not included in the range of * percentBegin to percentEnd to create a datasource. The * complement parameter is useful if you need to create complementary * datasources for training and evaluation. To create a complementary datasource, * use the same values for percentBegin and percentEnd, * along with the complement parameter.

    For example, the * following two datasources do not share any data, and can be used to train and * evaluate a model. The first datasource has 25 percent of the data, and the * second one has 75 percent of the data.

    Datasource for evaluation: * {"splitting":{"percentBegin":0, "percentEnd":25}}

    *

    Datasource for training: {"splitting":{"percentBegin":0, * "percentEnd":25, "complement":"true"}}

  • * strategy

    To change how Amazon ML splits the data for a * datasource, use the strategy parameter.

    The default value * for the strategy parameter is sequential, meaning that * Amazon ML takes all of the data records between the percentBegin * and percentEnd parameters for the datasource, in the order that the * records appear in the input data.

    The following two * DataRearrangement lines are examples of sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential"}}

    Datasource for training: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential", "complement":"true"}}

    To randomly split * the input data into the proportions indicated by the percentBegin and percentEnd * parameters, set the strategy parameter to random and * provide a string that is used as the seed value for the random data splitting * (for example, you can use the S3 path to your data as the random seed string). * If you choose the random split strategy, Amazon ML assigns each row of data a * pseudo-random number between 0 and 100, and then selects the rows that have an * assigned number between percentBegin and percentEnd. * Pseudo-random numbers are assigned using both the input seed string value and * the byte offset as a seed, so changing the data results in a different split. * Any existing ordering is preserved. The random splitting strategy ensures that * variables in the training and evaluation data are distributed similarly. It is * useful in the cases where the input data may have an implicit sort order, which * would otherwise result in training and evaluation datasources containing * non-similar data records.

    The following two * DataRearrangement lines are examples of non-sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", * "randomSeed"="s3://my_s3_path/bucket/file.csv"}}

    Datasource for * training: {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", * "complement":"true"}}

*/ inline RDSDataSpec& WithDataRearrangement(Aws::String&& value) { SetDataRearrangement(std::move(value)); return *this;} /** *

A JSON string that represents the splitting and rearrangement processing to * be applied to a DataSource. If the DataRearrangement * parameter is not provided, all of the input data is used to create the * Datasource.

There are multiple parameters that control what * data is used to create a datasource:

  • * percentBegin

    Use percentBegin to indicate * the beginning of the range of the data used to create the Datasource. If you do * not include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * percentEnd

    Use percentEnd to indicate the * end of the range of the data used to create the Datasource. If you do not * include percentBegin and percentEnd, Amazon ML * includes all of the data when creating the datasource.

  • * complement

    The complement parameter * instructs Amazon ML to use the data that is not included in the range of * percentBegin to percentEnd to create a datasource. The * complement parameter is useful if you need to create complementary * datasources for training and evaluation. To create a complementary datasource, * use the same values for percentBegin and percentEnd, * along with the complement parameter.

    For example, the * following two datasources do not share any data, and can be used to train and * evaluate a model. The first datasource has 25 percent of the data, and the * second one has 75 percent of the data.

    Datasource for evaluation: * {"splitting":{"percentBegin":0, "percentEnd":25}}

    *

    Datasource for training: {"splitting":{"percentBegin":0, * "percentEnd":25, "complement":"true"}}

  • * strategy

    To change how Amazon ML splits the data for a * datasource, use the strategy parameter.

    The default value * for the strategy parameter is sequential, meaning that * Amazon ML takes all of the data records between the percentBegin * and percentEnd parameters for the datasource, in the order that the * records appear in the input data.

    The following two * DataRearrangement lines are examples of sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential"}}

    Datasource for training: * {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"sequential", "complement":"true"}}

    To randomly split * the input data into the proportions indicated by the percentBegin and percentEnd * parameters, set the strategy parameter to random and * provide a string that is used as the seed value for the random data splitting * (for example, you can use the S3 path to your data as the random seed string). * If you choose the random split strategy, Amazon ML assigns each row of data a * pseudo-random number between 0 and 100, and then selects the rows that have an * assigned number between percentBegin and percentEnd. * Pseudo-random numbers are assigned using both the input seed string value and * the byte offset as a seed, so changing the data results in a different split. * Any existing ordering is preserved. The random splitting strategy ensures that * variables in the training and evaluation data are distributed similarly. It is * useful in the cases where the input data may have an implicit sort order, which * would otherwise result in training and evaluation datasources containing * non-similar data records.

    The following two * DataRearrangement lines are examples of non-sequentially ordered * training and evaluation datasources:

    Datasource for evaluation: * {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", * "randomSeed"="s3://my_s3_path/bucket/file.csv"}}

    Datasource for * training: {"splitting":{"percentBegin":70, "percentEnd":100, * "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", * "complement":"true"}}

*/ inline RDSDataSpec& WithDataRearrangement(const char* value) { SetDataRearrangement(value); return *this;} /** *

A JSON string that represents the schema for an Amazon RDS * DataSource. The DataSchema defines the structure of * the observation data in the data file(s) referenced in the * DataSource.

A DataSchema is not required if you * specify a DataSchemaUri

Define your DataSchema * as a series of key-value pairs. attributes and * excludedVariableNames have an array of key-value pairs for their * value. Use the following format to define your DataSchema.

{ * "version": "1.0",

"recordAnnotationFieldName": "F1",

*

"recordWeightFieldName": "F2",

"targetFieldName": "F3",

*

"dataFormat": "CSV",

"dataFileContainsHeader": true,

*

"attributes": [

{ "fieldName": "F1", "fieldType": "TEXT" }, { * "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": * "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": * "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],

*

"excludedVariableNames": [ "F6" ] }

*/ inline const Aws::String& GetDataSchema() const{ return m_dataSchema; } /** *

A JSON string that represents the schema for an Amazon RDS * DataSource. The DataSchema defines the structure of * the observation data in the data file(s) referenced in the * DataSource.

A DataSchema is not required if you * specify a DataSchemaUri

Define your DataSchema * as a series of key-value pairs. attributes and * excludedVariableNames have an array of key-value pairs for their * value. Use the following format to define your DataSchema.

{ * "version": "1.0",

"recordAnnotationFieldName": "F1",

*

"recordWeightFieldName": "F2",

"targetFieldName": "F3",

*

"dataFormat": "CSV",

"dataFileContainsHeader": true,

*

"attributes": [

{ "fieldName": "F1", "fieldType": "TEXT" }, { * "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": * "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": * "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],

*

"excludedVariableNames": [ "F6" ] }

*/ inline bool DataSchemaHasBeenSet() const { return m_dataSchemaHasBeenSet; } /** *

A JSON string that represents the schema for an Amazon RDS * DataSource. The DataSchema defines the structure of * the observation data in the data file(s) referenced in the * DataSource.

A DataSchema is not required if you * specify a DataSchemaUri

Define your DataSchema * as a series of key-value pairs. attributes and * excludedVariableNames have an array of key-value pairs for their * value. Use the following format to define your DataSchema.

{ * "version": "1.0",

"recordAnnotationFieldName": "F1",

*

"recordWeightFieldName": "F2",

"targetFieldName": "F3",

*

"dataFormat": "CSV",

"dataFileContainsHeader": true,

*

"attributes": [

{ "fieldName": "F1", "fieldType": "TEXT" }, { * "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": * "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": * "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],

*

"excludedVariableNames": [ "F6" ] }

*/ inline void SetDataSchema(const Aws::String& value) { m_dataSchemaHasBeenSet = true; m_dataSchema = value; } /** *

A JSON string that represents the schema for an Amazon RDS * DataSource. The DataSchema defines the structure of * the observation data in the data file(s) referenced in the * DataSource.

A DataSchema is not required if you * specify a DataSchemaUri

Define your DataSchema * as a series of key-value pairs. attributes and * excludedVariableNames have an array of key-value pairs for their * value. Use the following format to define your DataSchema.

{ * "version": "1.0",

"recordAnnotationFieldName": "F1",

*

"recordWeightFieldName": "F2",

"targetFieldName": "F3",

*

"dataFormat": "CSV",

"dataFileContainsHeader": true,

*

"attributes": [

{ "fieldName": "F1", "fieldType": "TEXT" }, { * "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": * "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": * "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],

*

"excludedVariableNames": [ "F6" ] }

*/ inline void SetDataSchema(Aws::String&& value) { m_dataSchemaHasBeenSet = true; m_dataSchema = std::move(value); } /** *

A JSON string that represents the schema for an Amazon RDS * DataSource. The DataSchema defines the structure of * the observation data in the data file(s) referenced in the * DataSource.

A DataSchema is not required if you * specify a DataSchemaUri

Define your DataSchema * as a series of key-value pairs. attributes and * excludedVariableNames have an array of key-value pairs for their * value. Use the following format to define your DataSchema.

{ * "version": "1.0",

"recordAnnotationFieldName": "F1",

*

"recordWeightFieldName": "F2",

"targetFieldName": "F3",

*

"dataFormat": "CSV",

"dataFileContainsHeader": true,

*

"attributes": [

{ "fieldName": "F1", "fieldType": "TEXT" }, { * "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": * "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": * "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],

*

"excludedVariableNames": [ "F6" ] }

*/ inline void SetDataSchema(const char* value) { m_dataSchemaHasBeenSet = true; m_dataSchema.assign(value); } /** *

A JSON string that represents the schema for an Amazon RDS * DataSource. The DataSchema defines the structure of * the observation data in the data file(s) referenced in the * DataSource.

A DataSchema is not required if you * specify a DataSchemaUri

Define your DataSchema * as a series of key-value pairs. attributes and * excludedVariableNames have an array of key-value pairs for their * value. Use the following format to define your DataSchema.

{ * "version": "1.0",

"recordAnnotationFieldName": "F1",

*

"recordWeightFieldName": "F2",

"targetFieldName": "F3",

*

"dataFormat": "CSV",

"dataFileContainsHeader": true,

*

"attributes": [

{ "fieldName": "F1", "fieldType": "TEXT" }, { * "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": * "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": * "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],

*

"excludedVariableNames": [ "F6" ] }

*/ inline RDSDataSpec& WithDataSchema(const Aws::String& value) { SetDataSchema(value); return *this;} /** *

A JSON string that represents the schema for an Amazon RDS * DataSource. The DataSchema defines the structure of * the observation data in the data file(s) referenced in the * DataSource.

A DataSchema is not required if you * specify a DataSchemaUri

Define your DataSchema * as a series of key-value pairs. attributes and * excludedVariableNames have an array of key-value pairs for their * value. Use the following format to define your DataSchema.

{ * "version": "1.0",

"recordAnnotationFieldName": "F1",

*

"recordWeightFieldName": "F2",

"targetFieldName": "F3",

*

"dataFormat": "CSV",

"dataFileContainsHeader": true,

*

"attributes": [

{ "fieldName": "F1", "fieldType": "TEXT" }, { * "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": * "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": * "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],

*

"excludedVariableNames": [ "F6" ] }

*/ inline RDSDataSpec& WithDataSchema(Aws::String&& value) { SetDataSchema(std::move(value)); return *this;} /** *

A JSON string that represents the schema for an Amazon RDS * DataSource. The DataSchema defines the structure of * the observation data in the data file(s) referenced in the * DataSource.

A DataSchema is not required if you * specify a DataSchemaUri

Define your DataSchema * as a series of key-value pairs. attributes and * excludedVariableNames have an array of key-value pairs for their * value. Use the following format to define your DataSchema.

{ * "version": "1.0",

"recordAnnotationFieldName": "F1",

*

"recordWeightFieldName": "F2",

"targetFieldName": "F3",

*

"dataFormat": "CSV",

"dataFileContainsHeader": true,

*

"attributes": [

{ "fieldName": "F1", "fieldType": "TEXT" }, { * "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": * "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": * "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],

*

"excludedVariableNames": [ "F6" ] }

*/ inline RDSDataSpec& WithDataSchema(const char* value) { SetDataSchema(value); return *this;} /** *

The Amazon S3 location of the DataSchema.

*/ inline const Aws::String& GetDataSchemaUri() const{ return m_dataSchemaUri; } /** *

The Amazon S3 location of the DataSchema.

*/ inline bool DataSchemaUriHasBeenSet() const { return m_dataSchemaUriHasBeenSet; } /** *

The Amazon S3 location of the DataSchema.

*/ inline void SetDataSchemaUri(const Aws::String& value) { m_dataSchemaUriHasBeenSet = true; m_dataSchemaUri = value; } /** *

The Amazon S3 location of the DataSchema.

*/ inline void SetDataSchemaUri(Aws::String&& value) { m_dataSchemaUriHasBeenSet = true; m_dataSchemaUri = std::move(value); } /** *

The Amazon S3 location of the DataSchema.

*/ inline void SetDataSchemaUri(const char* value) { m_dataSchemaUriHasBeenSet = true; m_dataSchemaUri.assign(value); } /** *

The Amazon S3 location of the DataSchema.

*/ inline RDSDataSpec& WithDataSchemaUri(const Aws::String& value) { SetDataSchemaUri(value); return *this;} /** *

The Amazon S3 location of the DataSchema.

*/ inline RDSDataSpec& WithDataSchemaUri(Aws::String&& value) { SetDataSchemaUri(std::move(value)); return *this;} /** *

The Amazon S3 location of the DataSchema.

*/ inline RDSDataSpec& WithDataSchemaUri(const char* value) { SetDataSchemaUri(value); return *this;} /** *

The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic * Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon * RDS to an Amazon S3 task. For more information, see Role * templates for data pipelines.

*/ inline const Aws::String& GetResourceRole() const{ return m_resourceRole; } /** *

The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic * Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon * RDS to an Amazon S3 task. For more information, see Role * templates for data pipelines.

*/ inline bool ResourceRoleHasBeenSet() const { return m_resourceRoleHasBeenSet; } /** *

The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic * Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon * RDS to an Amazon S3 task. For more information, see Role * templates for data pipelines.

*/ inline void SetResourceRole(const Aws::String& value) { m_resourceRoleHasBeenSet = true; m_resourceRole = value; } /** *

The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic * Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon * RDS to an Amazon S3 task. For more information, see Role * templates for data pipelines.

*/ inline void SetResourceRole(Aws::String&& value) { m_resourceRoleHasBeenSet = true; m_resourceRole = std::move(value); } /** *

The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic * Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon * RDS to an Amazon S3 task. For more information, see Role * templates for data pipelines.

*/ inline void SetResourceRole(const char* value) { m_resourceRoleHasBeenSet = true; m_resourceRole.assign(value); } /** *

The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic * Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon * RDS to an Amazon S3 task. For more information, see Role * templates for data pipelines.

*/ inline RDSDataSpec& WithResourceRole(const Aws::String& value) { SetResourceRole(value); return *this;} /** *

The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic * Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon * RDS to an Amazon S3 task. For more information, see Role * templates for data pipelines.

*/ inline RDSDataSpec& WithResourceRole(Aws::String&& value) { SetResourceRole(std::move(value)); return *this;} /** *

The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic * Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon * RDS to an Amazon S3 task. For more information, see Role * templates for data pipelines.

*/ inline RDSDataSpec& WithResourceRole(const char* value) { SetResourceRole(value); return *this;} /** *

The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to * monitor the progress of the copy task from Amazon RDS to Amazon S3. For more * information, see Role * templates for data pipelines.

*/ inline const Aws::String& GetServiceRole() const{ return m_serviceRole; } /** *

The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to * monitor the progress of the copy task from Amazon RDS to Amazon S3. For more * information, see Role * templates for data pipelines.

*/ inline bool ServiceRoleHasBeenSet() const { return m_serviceRoleHasBeenSet; } /** *

The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to * monitor the progress of the copy task from Amazon RDS to Amazon S3. For more * information, see Role * templates for data pipelines.

*/ inline void SetServiceRole(const Aws::String& value) { m_serviceRoleHasBeenSet = true; m_serviceRole = value; } /** *

The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to * monitor the progress of the copy task from Amazon RDS to Amazon S3. For more * information, see Role * templates for data pipelines.

*/ inline void SetServiceRole(Aws::String&& value) { m_serviceRoleHasBeenSet = true; m_serviceRole = std::move(value); } /** *

The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to * monitor the progress of the copy task from Amazon RDS to Amazon S3. For more * information, see Role * templates for data pipelines.

*/ inline void SetServiceRole(const char* value) { m_serviceRoleHasBeenSet = true; m_serviceRole.assign(value); } /** *

The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to * monitor the progress of the copy task from Amazon RDS to Amazon S3. For more * information, see Role * templates for data pipelines.

*/ inline RDSDataSpec& WithServiceRole(const Aws::String& value) { SetServiceRole(value); return *this;} /** *

The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to * monitor the progress of the copy task from Amazon RDS to Amazon S3. For more * information, see Role * templates for data pipelines.

*/ inline RDSDataSpec& WithServiceRole(Aws::String&& value) { SetServiceRole(std::move(value)); return *this;} /** *

The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to * monitor the progress of the copy task from Amazon RDS to Amazon S3. For more * information, see Role * templates for data pipelines.

*/ inline RDSDataSpec& WithServiceRole(const char* value) { SetServiceRole(value); return *this;} /** *

The subnet ID to be used to access a VPC-based RDS DB instance. This * attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to * Amazon S3.

*/ inline const Aws::String& GetSubnetId() const{ return m_subnetId; } /** *

The subnet ID to be used to access a VPC-based RDS DB instance. This * attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to * Amazon S3.

*/ inline bool SubnetIdHasBeenSet() const { return m_subnetIdHasBeenSet; } /** *

The subnet ID to be used to access a VPC-based RDS DB instance. This * attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to * Amazon S3.

*/ inline void SetSubnetId(const Aws::String& value) { m_subnetIdHasBeenSet = true; m_subnetId = value; } /** *

The subnet ID to be used to access a VPC-based RDS DB instance. This * attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to * Amazon S3.

*/ inline void SetSubnetId(Aws::String&& value) { m_subnetIdHasBeenSet = true; m_subnetId = std::move(value); } /** *

The subnet ID to be used to access a VPC-based RDS DB instance. This * attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to * Amazon S3.

*/ inline void SetSubnetId(const char* value) { m_subnetIdHasBeenSet = true; m_subnetId.assign(value); } /** *

The subnet ID to be used to access a VPC-based RDS DB instance. This * attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to * Amazon S3.

*/ inline RDSDataSpec& WithSubnetId(const Aws::String& value) { SetSubnetId(value); return *this;} /** *

The subnet ID to be used to access a VPC-based RDS DB instance. This * attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to * Amazon S3.

*/ inline RDSDataSpec& WithSubnetId(Aws::String&& value) { SetSubnetId(std::move(value)); return *this;} /** *

The subnet ID to be used to access a VPC-based RDS DB instance. This * attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to * Amazon S3.

*/ inline RDSDataSpec& WithSubnetId(const char* value) { SetSubnetId(value); return *this;} /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline const Aws::Vector& GetSecurityGroupIds() const{ return m_securityGroupIds; } /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline bool SecurityGroupIdsHasBeenSet() const { return m_securityGroupIdsHasBeenSet; } /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline void SetSecurityGroupIds(const Aws::Vector& value) { m_securityGroupIdsHasBeenSet = true; m_securityGroupIds = value; } /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline void SetSecurityGroupIds(Aws::Vector&& value) { m_securityGroupIdsHasBeenSet = true; m_securityGroupIds = std::move(value); } /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline RDSDataSpec& WithSecurityGroupIds(const Aws::Vector& value) { SetSecurityGroupIds(value); return *this;} /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline RDSDataSpec& WithSecurityGroupIds(Aws::Vector&& value) { SetSecurityGroupIds(std::move(value)); return *this;} /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline RDSDataSpec& AddSecurityGroupIds(const Aws::String& value) { m_securityGroupIdsHasBeenSet = true; m_securityGroupIds.push_back(value); return *this; } /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline RDSDataSpec& AddSecurityGroupIds(Aws::String&& value) { m_securityGroupIdsHasBeenSet = true; m_securityGroupIds.push_back(std::move(value)); return *this; } /** *

The security group IDs to be used to access a VPC-based RDS DB instance. * Ensure that there are appropriate ingress rules set up to allow access to the * RDS DB instance. This attribute is used by Data Pipeline to carry out the copy * operation from Amazon RDS to an Amazon S3 task.

*/ inline RDSDataSpec& AddSecurityGroupIds(const char* value) { m_securityGroupIdsHasBeenSet = true; m_securityGroupIds.push_back(value); return *this; } private: RDSDatabase m_databaseInformation; bool m_databaseInformationHasBeenSet = false; Aws::String m_selectSqlQuery; bool m_selectSqlQueryHasBeenSet = false; RDSDatabaseCredentials m_databaseCredentials; bool m_databaseCredentialsHasBeenSet = false; Aws::String m_s3StagingLocation; bool m_s3StagingLocationHasBeenSet = false; Aws::String m_dataRearrangement; bool m_dataRearrangementHasBeenSet = false; Aws::String m_dataSchema; bool m_dataSchemaHasBeenSet = false; Aws::String m_dataSchemaUri; bool m_dataSchemaUriHasBeenSet = false; Aws::String m_resourceRole; bool m_resourceRoleHasBeenSet = false; Aws::String m_serviceRole; bool m_serviceRoleHasBeenSet = false; Aws::String m_subnetId; bool m_subnetIdHasBeenSet = false; Aws::Vector m_securityGroupIds; bool m_securityGroupIdsHasBeenSet = false; }; } // namespace Model } // namespace MachineLearning } // namespace Aws