/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include A channel is a named input source that training algorithms can consume.
* See Also:
AWS
* API Reference
The name of the channel.
*/ inline const Aws::String& GetChannelName() const{ return m_channelName; } /** *The name of the channel.
*/ inline bool ChannelNameHasBeenSet() const { return m_channelNameHasBeenSet; } /** *The name of the channel.
*/ inline void SetChannelName(const Aws::String& value) { m_channelNameHasBeenSet = true; m_channelName = value; } /** *The name of the channel.
*/ inline void SetChannelName(Aws::String&& value) { m_channelNameHasBeenSet = true; m_channelName = std::move(value); } /** *The name of the channel.
*/ inline void SetChannelName(const char* value) { m_channelNameHasBeenSet = true; m_channelName.assign(value); } /** *The name of the channel.
*/ inline Channel& WithChannelName(const Aws::String& value) { SetChannelName(value); return *this;} /** *The name of the channel.
*/ inline Channel& WithChannelName(Aws::String&& value) { SetChannelName(std::move(value)); return *this;} /** *The name of the channel.
*/ inline Channel& WithChannelName(const char* value) { SetChannelName(value); return *this;} /** *The location of the channel data.
*/ inline const DataSource& GetDataSource() const{ return m_dataSource; } /** *The location of the channel data.
*/ inline bool DataSourceHasBeenSet() const { return m_dataSourceHasBeenSet; } /** *The location of the channel data.
*/ inline void SetDataSource(const DataSource& value) { m_dataSourceHasBeenSet = true; m_dataSource = value; } /** *The location of the channel data.
*/ inline void SetDataSource(DataSource&& value) { m_dataSourceHasBeenSet = true; m_dataSource = std::move(value); } /** *The location of the channel data.
*/ inline Channel& WithDataSource(const DataSource& value) { SetDataSource(value); return *this;} /** *The location of the channel data.
*/ inline Channel& WithDataSource(DataSource&& value) { SetDataSource(std::move(value)); return *this;} /** *The MIME type of the data.
*/ inline const Aws::String& GetContentType() const{ return m_contentType; } /** *The MIME type of the data.
*/ inline bool ContentTypeHasBeenSet() const { return m_contentTypeHasBeenSet; } /** *The MIME type of the data.
*/ inline void SetContentType(const Aws::String& value) { m_contentTypeHasBeenSet = true; m_contentType = value; } /** *The MIME type of the data.
*/ inline void SetContentType(Aws::String&& value) { m_contentTypeHasBeenSet = true; m_contentType = std::move(value); } /** *The MIME type of the data.
*/ inline void SetContentType(const char* value) { m_contentTypeHasBeenSet = true; m_contentType.assign(value); } /** *The MIME type of the data.
*/ inline Channel& WithContentType(const Aws::String& value) { SetContentType(value); return *this;} /** *The MIME type of the data.
*/ inline Channel& WithContentType(Aws::String&& value) { SetContentType(std::move(value)); return *this;} /** *The MIME type of the data.
*/ inline Channel& WithContentType(const char* value) { SetContentType(value); return *this;} /** *If training data is compressed, the compression type. The default value is
* None
. CompressionType
is used only in Pipe input mode.
* In File mode, leave this field unset or set it to None.
If training data is compressed, the compression type. The default value is
* None
. CompressionType
is used only in Pipe input mode.
* In File mode, leave this field unset or set it to None.
If training data is compressed, the compression type. The default value is
* None
. CompressionType
is used only in Pipe input mode.
* In File mode, leave this field unset or set it to None.
If training data is compressed, the compression type. The default value is
* None
. CompressionType
is used only in Pipe input mode.
* In File mode, leave this field unset or set it to None.
If training data is compressed, the compression type. The default value is
* None
. CompressionType
is used only in Pipe input mode.
* In File mode, leave this field unset or set it to None.
If training data is compressed, the compression type. The default value is
* None
. CompressionType
is used only in Pipe input mode.
* In File mode, leave this field unset or set it to None.
Specify RecordIO as the value when input data is in raw format but the * training algorithm requires the RecordIO format. In this case, SageMaker wraps * each individual S3 object in a RecordIO record. If the input data is already in * RecordIO format, you don't need to set this attribute. For more information, see * Create * a Dataset Using RecordIO.
In File mode, leave this field unset or * set it to None.
*/ inline const RecordWrapper& GetRecordWrapperType() const{ return m_recordWrapperType; } /** *Specify RecordIO as the value when input data is in raw format but the * training algorithm requires the RecordIO format. In this case, SageMaker wraps * each individual S3 object in a RecordIO record. If the input data is already in * RecordIO format, you don't need to set this attribute. For more information, see * Create * a Dataset Using RecordIO.
In File mode, leave this field unset or * set it to None.
*/ inline bool RecordWrapperTypeHasBeenSet() const { return m_recordWrapperTypeHasBeenSet; } /** *Specify RecordIO as the value when input data is in raw format but the * training algorithm requires the RecordIO format. In this case, SageMaker wraps * each individual S3 object in a RecordIO record. If the input data is already in * RecordIO format, you don't need to set this attribute. For more information, see * Create * a Dataset Using RecordIO.
In File mode, leave this field unset or * set it to None.
*/ inline void SetRecordWrapperType(const RecordWrapper& value) { m_recordWrapperTypeHasBeenSet = true; m_recordWrapperType = value; } /** *Specify RecordIO as the value when input data is in raw format but the * training algorithm requires the RecordIO format. In this case, SageMaker wraps * each individual S3 object in a RecordIO record. If the input data is already in * RecordIO format, you don't need to set this attribute. For more information, see * Create * a Dataset Using RecordIO.
In File mode, leave this field unset or * set it to None.
*/ inline void SetRecordWrapperType(RecordWrapper&& value) { m_recordWrapperTypeHasBeenSet = true; m_recordWrapperType = std::move(value); } /** *Specify RecordIO as the value when input data is in raw format but the * training algorithm requires the RecordIO format. In this case, SageMaker wraps * each individual S3 object in a RecordIO record. If the input data is already in * RecordIO format, you don't need to set this attribute. For more information, see * Create * a Dataset Using RecordIO.
In File mode, leave this field unset or * set it to None.
*/ inline Channel& WithRecordWrapperType(const RecordWrapper& value) { SetRecordWrapperType(value); return *this;} /** *Specify RecordIO as the value when input data is in raw format but the * training algorithm requires the RecordIO format. In this case, SageMaker wraps * each individual S3 object in a RecordIO record. If the input data is already in * RecordIO format, you don't need to set this attribute. For more information, see * Create * a Dataset Using RecordIO.
In File mode, leave this field unset or * set it to None.
*/ inline Channel& WithRecordWrapperType(RecordWrapper&& value) { SetRecordWrapperType(std::move(value)); return *this;} /** *(Optional) The input mode to use for the data channel in a training job. If
* you don't set a value for InputMode
, SageMaker uses the value set
* for TrainingInputMode
. Use this parameter to override the
* TrainingInputMode
setting in a AlgorithmSpecification
* request when you have a channel that needs a different input mode from the
* training job's general setting. To download the data from Amazon Simple Storage
* Service (Amazon S3) to the provisioned ML storage volume, and mount the
* directory to a Docker volume, use File
input mode. To stream data
* directly from Amazon S3 to the container, choose Pipe
input
* mode.
To use a model for incremental training, choose File
* input model.
(Optional) The input mode to use for the data channel in a training job. If
* you don't set a value for InputMode
, SageMaker uses the value set
* for TrainingInputMode
. Use this parameter to override the
* TrainingInputMode
setting in a AlgorithmSpecification
* request when you have a channel that needs a different input mode from the
* training job's general setting. To download the data from Amazon Simple Storage
* Service (Amazon S3) to the provisioned ML storage volume, and mount the
* directory to a Docker volume, use File
input mode. To stream data
* directly from Amazon S3 to the container, choose Pipe
input
* mode.
To use a model for incremental training, choose File
* input model.
(Optional) The input mode to use for the data channel in a training job. If
* you don't set a value for InputMode
, SageMaker uses the value set
* for TrainingInputMode
. Use this parameter to override the
* TrainingInputMode
setting in a AlgorithmSpecification
* request when you have a channel that needs a different input mode from the
* training job's general setting. To download the data from Amazon Simple Storage
* Service (Amazon S3) to the provisioned ML storage volume, and mount the
* directory to a Docker volume, use File
input mode. To stream data
* directly from Amazon S3 to the container, choose Pipe
input
* mode.
To use a model for incremental training, choose File
* input model.
(Optional) The input mode to use for the data channel in a training job. If
* you don't set a value for InputMode
, SageMaker uses the value set
* for TrainingInputMode
. Use this parameter to override the
* TrainingInputMode
setting in a AlgorithmSpecification
* request when you have a channel that needs a different input mode from the
* training job's general setting. To download the data from Amazon Simple Storage
* Service (Amazon S3) to the provisioned ML storage volume, and mount the
* directory to a Docker volume, use File
input mode. To stream data
* directly from Amazon S3 to the container, choose Pipe
input
* mode.
To use a model for incremental training, choose File
* input model.
(Optional) The input mode to use for the data channel in a training job. If
* you don't set a value for InputMode
, SageMaker uses the value set
* for TrainingInputMode
. Use this parameter to override the
* TrainingInputMode
setting in a AlgorithmSpecification
* request when you have a channel that needs a different input mode from the
* training job's general setting. To download the data from Amazon Simple Storage
* Service (Amazon S3) to the provisioned ML storage volume, and mount the
* directory to a Docker volume, use File
input mode. To stream data
* directly from Amazon S3 to the container, choose Pipe
input
* mode.
To use a model for incremental training, choose File
* input model.
(Optional) The input mode to use for the data channel in a training job. If
* you don't set a value for InputMode
, SageMaker uses the value set
* for TrainingInputMode
. Use this parameter to override the
* TrainingInputMode
setting in a AlgorithmSpecification
* request when you have a channel that needs a different input mode from the
* training job's general setting. To download the data from Amazon Simple Storage
* Service (Amazon S3) to the provisioned ML storage volume, and mount the
* directory to a Docker volume, use File
input mode. To stream data
* directly from Amazon S3 to the container, choose Pipe
input
* mode.
To use a model for incremental training, choose File
* input model.
A configuration for a shuffle option for input data in a channel. If you use
* S3Prefix
for S3DataType
, this shuffles the results of
* the S3 key prefix matches. If you use ManifestFile
, the order of
* the S3 object references in the ManifestFile
is shuffled. If you
* use AugmentedManifestFile
, the order of the JSON lines in the
* AugmentedManifestFile
is shuffled. The shuffling order is
* determined using the Seed
value.
For Pipe input mode,
* shuffling is done at the start of every epoch. With large datasets this ensures
* that the order of the training data is different for each epoch, it helps reduce
* bias and possible overfitting. In a multi-node training job when ShuffleConfig
* is combined with S3DataDistributionType
of
* ShardedByS3Key
, the data is shuffled across nodes so that the
* content sent to a particular node on the first epoch might be sent to a
* different node on the second epoch.
A configuration for a shuffle option for input data in a channel. If you use
* S3Prefix
for S3DataType
, this shuffles the results of
* the S3 key prefix matches. If you use ManifestFile
, the order of
* the S3 object references in the ManifestFile
is shuffled. If you
* use AugmentedManifestFile
, the order of the JSON lines in the
* AugmentedManifestFile
is shuffled. The shuffling order is
* determined using the Seed
value.
For Pipe input mode,
* shuffling is done at the start of every epoch. With large datasets this ensures
* that the order of the training data is different for each epoch, it helps reduce
* bias and possible overfitting. In a multi-node training job when ShuffleConfig
* is combined with S3DataDistributionType
of
* ShardedByS3Key
, the data is shuffled across nodes so that the
* content sent to a particular node on the first epoch might be sent to a
* different node on the second epoch.
A configuration for a shuffle option for input data in a channel. If you use
* S3Prefix
for S3DataType
, this shuffles the results of
* the S3 key prefix matches. If you use ManifestFile
, the order of
* the S3 object references in the ManifestFile
is shuffled. If you
* use AugmentedManifestFile
, the order of the JSON lines in the
* AugmentedManifestFile
is shuffled. The shuffling order is
* determined using the Seed
value.
For Pipe input mode,
* shuffling is done at the start of every epoch. With large datasets this ensures
* that the order of the training data is different for each epoch, it helps reduce
* bias and possible overfitting. In a multi-node training job when ShuffleConfig
* is combined with S3DataDistributionType
of
* ShardedByS3Key
, the data is shuffled across nodes so that the
* content sent to a particular node on the first epoch might be sent to a
* different node on the second epoch.
A configuration for a shuffle option for input data in a channel. If you use
* S3Prefix
for S3DataType
, this shuffles the results of
* the S3 key prefix matches. If you use ManifestFile
, the order of
* the S3 object references in the ManifestFile
is shuffled. If you
* use AugmentedManifestFile
, the order of the JSON lines in the
* AugmentedManifestFile
is shuffled. The shuffling order is
* determined using the Seed
value.
For Pipe input mode,
* shuffling is done at the start of every epoch. With large datasets this ensures
* that the order of the training data is different for each epoch, it helps reduce
* bias and possible overfitting. In a multi-node training job when ShuffleConfig
* is combined with S3DataDistributionType
of
* ShardedByS3Key
, the data is shuffled across nodes so that the
* content sent to a particular node on the first epoch might be sent to a
* different node on the second epoch.
A configuration for a shuffle option for input data in a channel. If you use
* S3Prefix
for S3DataType
, this shuffles the results of
* the S3 key prefix matches. If you use ManifestFile
, the order of
* the S3 object references in the ManifestFile
is shuffled. If you
* use AugmentedManifestFile
, the order of the JSON lines in the
* AugmentedManifestFile
is shuffled. The shuffling order is
* determined using the Seed
value.
For Pipe input mode,
* shuffling is done at the start of every epoch. With large datasets this ensures
* that the order of the training data is different for each epoch, it helps reduce
* bias and possible overfitting. In a multi-node training job when ShuffleConfig
* is combined with S3DataDistributionType
of
* ShardedByS3Key
, the data is shuffled across nodes so that the
* content sent to a particular node on the first epoch might be sent to a
* different node on the second epoch.
A configuration for a shuffle option for input data in a channel. If you use
* S3Prefix
for S3DataType
, this shuffles the results of
* the S3 key prefix matches. If you use ManifestFile
, the order of
* the S3 object references in the ManifestFile
is shuffled. If you
* use AugmentedManifestFile
, the order of the JSON lines in the
* AugmentedManifestFile
is shuffled. The shuffling order is
* determined using the Seed
value.
For Pipe input mode,
* shuffling is done at the start of every epoch. With large datasets this ensures
* that the order of the training data is different for each epoch, it helps reduce
* bias and possible overfitting. In a multi-node training job when ShuffleConfig
* is combined with S3DataDistributionType
of
* ShardedByS3Key
, the data is shuffled across nodes so that the
* content sent to a particular node on the first epoch might be sent to a
* different node on the second epoch.