/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Defines the training jobs launched by a hyperparameter tuning
* job.See Also:
AWS
* API Reference
The job definition name.
*/ inline const Aws::String& GetDefinitionName() const{ return m_definitionName; } /** *The job definition name.
*/ inline bool DefinitionNameHasBeenSet() const { return m_definitionNameHasBeenSet; } /** *The job definition name.
*/ inline void SetDefinitionName(const Aws::String& value) { m_definitionNameHasBeenSet = true; m_definitionName = value; } /** *The job definition name.
*/ inline void SetDefinitionName(Aws::String&& value) { m_definitionNameHasBeenSet = true; m_definitionName = std::move(value); } /** *The job definition name.
*/ inline void SetDefinitionName(const char* value) { m_definitionNameHasBeenSet = true; m_definitionName.assign(value); } /** *The job definition name.
*/ inline HyperParameterTrainingJobDefinition& WithDefinitionName(const Aws::String& value) { SetDefinitionName(value); return *this;} /** *The job definition name.
*/ inline HyperParameterTrainingJobDefinition& WithDefinitionName(Aws::String&& value) { SetDefinitionName(std::move(value)); return *this;} /** *The job definition name.
*/ inline HyperParameterTrainingJobDefinition& WithDefinitionName(const char* value) { SetDefinitionName(value); return *this;} inline const HyperParameterTuningJobObjective& GetTuningObjective() const{ return m_tuningObjective; } inline bool TuningObjectiveHasBeenSet() const { return m_tuningObjectiveHasBeenSet; } inline void SetTuningObjective(const HyperParameterTuningJobObjective& value) { m_tuningObjectiveHasBeenSet = true; m_tuningObjective = value; } inline void SetTuningObjective(HyperParameterTuningJobObjective&& value) { m_tuningObjectiveHasBeenSet = true; m_tuningObjective = std::move(value); } inline HyperParameterTrainingJobDefinition& WithTuningObjective(const HyperParameterTuningJobObjective& value) { SetTuningObjective(value); return *this;} inline HyperParameterTrainingJobDefinition& WithTuningObjective(HyperParameterTuningJobObjective&& value) { SetTuningObjective(std::move(value)); return *this;} inline const ParameterRanges& GetHyperParameterRanges() const{ return m_hyperParameterRanges; } inline bool HyperParameterRangesHasBeenSet() const { return m_hyperParameterRangesHasBeenSet; } inline void SetHyperParameterRanges(const ParameterRanges& value) { m_hyperParameterRangesHasBeenSet = true; m_hyperParameterRanges = value; } inline void SetHyperParameterRanges(ParameterRanges&& value) { m_hyperParameterRangesHasBeenSet = true; m_hyperParameterRanges = std::move(value); } inline HyperParameterTrainingJobDefinition& WithHyperParameterRanges(const ParameterRanges& value) { SetHyperParameterRanges(value); return *this;} inline HyperParameterTrainingJobDefinition& WithHyperParameterRanges(ParameterRanges&& value) { SetHyperParameterRanges(std::move(value)); return *this;} /** *Specifies the values of hyperparameters that do not change for the tuning * job.
*/ inline const Aws::MapSpecifies the values of hyperparameters that do not change for the tuning * job.
*/ inline bool StaticHyperParametersHasBeenSet() const { return m_staticHyperParametersHasBeenSet; } /** *Specifies the values of hyperparameters that do not change for the tuning * job.
*/ inline void SetStaticHyperParameters(const Aws::MapSpecifies the values of hyperparameters that do not change for the tuning * job.
*/ inline void SetStaticHyperParameters(Aws::MapSpecifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& WithStaticHyperParameters(const Aws::MapSpecifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& WithStaticHyperParameters(Aws::MapSpecifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& AddStaticHyperParameters(const Aws::String& key, const Aws::String& value) { m_staticHyperParametersHasBeenSet = true; m_staticHyperParameters.emplace(key, value); return *this; } /** *Specifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& AddStaticHyperParameters(Aws::String&& key, const Aws::String& value) { m_staticHyperParametersHasBeenSet = true; m_staticHyperParameters.emplace(std::move(key), value); return *this; } /** *Specifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& AddStaticHyperParameters(const Aws::String& key, Aws::String&& value) { m_staticHyperParametersHasBeenSet = true; m_staticHyperParameters.emplace(key, std::move(value)); return *this; } /** *Specifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& AddStaticHyperParameters(Aws::String&& key, Aws::String&& value) { m_staticHyperParametersHasBeenSet = true; m_staticHyperParameters.emplace(std::move(key), std::move(value)); return *this; } /** *Specifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& AddStaticHyperParameters(const char* key, Aws::String&& value) { m_staticHyperParametersHasBeenSet = true; m_staticHyperParameters.emplace(key, std::move(value)); return *this; } /** *Specifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& AddStaticHyperParameters(Aws::String&& key, const char* value) { m_staticHyperParametersHasBeenSet = true; m_staticHyperParameters.emplace(std::move(key), value); return *this; } /** *Specifies the values of hyperparameters that do not change for the tuning * job.
*/ inline HyperParameterTrainingJobDefinition& AddStaticHyperParameters(const char* key, const char* value) { m_staticHyperParametersHasBeenSet = true; m_staticHyperParameters.emplace(key, value); return *this; } /** *The HyperParameterAlgorithmSpecification * object that specifies the resource algorithm to use for the training jobs that * the tuning job launches.
*/ inline const HyperParameterAlgorithmSpecification& GetAlgorithmSpecification() const{ return m_algorithmSpecification; } /** *The HyperParameterAlgorithmSpecification * object that specifies the resource algorithm to use for the training jobs that * the tuning job launches.
*/ inline bool AlgorithmSpecificationHasBeenSet() const { return m_algorithmSpecificationHasBeenSet; } /** *The HyperParameterAlgorithmSpecification * object that specifies the resource algorithm to use for the training jobs that * the tuning job launches.
*/ inline void SetAlgorithmSpecification(const HyperParameterAlgorithmSpecification& value) { m_algorithmSpecificationHasBeenSet = true; m_algorithmSpecification = value; } /** *The HyperParameterAlgorithmSpecification * object that specifies the resource algorithm to use for the training jobs that * the tuning job launches.
*/ inline void SetAlgorithmSpecification(HyperParameterAlgorithmSpecification&& value) { m_algorithmSpecificationHasBeenSet = true; m_algorithmSpecification = std::move(value); } /** *The HyperParameterAlgorithmSpecification * object that specifies the resource algorithm to use for the training jobs that * the tuning job launches.
*/ inline HyperParameterTrainingJobDefinition& WithAlgorithmSpecification(const HyperParameterAlgorithmSpecification& value) { SetAlgorithmSpecification(value); return *this;} /** *The HyperParameterAlgorithmSpecification * object that specifies the resource algorithm to use for the training jobs that * the tuning job launches.
*/ inline HyperParameterTrainingJobDefinition& WithAlgorithmSpecification(HyperParameterAlgorithmSpecification&& value) { SetAlgorithmSpecification(std::move(value)); return *this;} /** *The Amazon Resource Name (ARN) of the IAM role associated with the training * jobs that the tuning job launches.
*/ inline const Aws::String& GetRoleArn() const{ return m_roleArn; } /** *The Amazon Resource Name (ARN) of the IAM role associated with the training * jobs that the tuning job launches.
*/ inline bool RoleArnHasBeenSet() const { return m_roleArnHasBeenSet; } /** *The Amazon Resource Name (ARN) of the IAM role associated with the training * jobs that the tuning job launches.
*/ inline void SetRoleArn(const Aws::String& value) { m_roleArnHasBeenSet = true; m_roleArn = value; } /** *The Amazon Resource Name (ARN) of the IAM role associated with the training * jobs that the tuning job launches.
*/ inline void SetRoleArn(Aws::String&& value) { m_roleArnHasBeenSet = true; m_roleArn = std::move(value); } /** *The Amazon Resource Name (ARN) of the IAM role associated with the training * jobs that the tuning job launches.
*/ inline void SetRoleArn(const char* value) { m_roleArnHasBeenSet = true; m_roleArn.assign(value); } /** *The Amazon Resource Name (ARN) of the IAM role associated with the training * jobs that the tuning job launches.
*/ inline HyperParameterTrainingJobDefinition& WithRoleArn(const Aws::String& value) { SetRoleArn(value); return *this;} /** *The Amazon Resource Name (ARN) of the IAM role associated with the training * jobs that the tuning job launches.
*/ inline HyperParameterTrainingJobDefinition& WithRoleArn(Aws::String&& value) { SetRoleArn(std::move(value)); return *this;} /** *The Amazon Resource Name (ARN) of the IAM role associated with the training * jobs that the tuning job launches.
*/ inline HyperParameterTrainingJobDefinition& WithRoleArn(const char* value) { SetRoleArn(value); return *this;} /** *An array of Channel * objects that specify the input for the training jobs that the tuning job * launches.
*/ inline const Aws::VectorAn array of Channel * objects that specify the input for the training jobs that the tuning job * launches.
*/ inline bool InputDataConfigHasBeenSet() const { return m_inputDataConfigHasBeenSet; } /** *An array of Channel * objects that specify the input for the training jobs that the tuning job * launches.
*/ inline void SetInputDataConfig(const Aws::VectorAn array of Channel * objects that specify the input for the training jobs that the tuning job * launches.
*/ inline void SetInputDataConfig(Aws::VectorAn array of Channel * objects that specify the input for the training jobs that the tuning job * launches.
*/ inline HyperParameterTrainingJobDefinition& WithInputDataConfig(const Aws::VectorAn array of Channel * objects that specify the input for the training jobs that the tuning job * launches.
*/ inline HyperParameterTrainingJobDefinition& WithInputDataConfig(Aws::VectorAn array of Channel * objects that specify the input for the training jobs that the tuning job * launches.
*/ inline HyperParameterTrainingJobDefinition& AddInputDataConfig(const Channel& value) { m_inputDataConfigHasBeenSet = true; m_inputDataConfig.push_back(value); return *this; } /** *An array of Channel * objects that specify the input for the training jobs that the tuning job * launches.
*/ inline HyperParameterTrainingJobDefinition& AddInputDataConfig(Channel&& value) { m_inputDataConfigHasBeenSet = true; m_inputDataConfig.push_back(std::move(value)); return *this; } /** *The VpcConfig * object that specifies the VPC that you want the training jobs that this * hyperparameter tuning job launches to connect to. Control access to and from * your training container by configuring the VPC. For more information, see Protect * Training Jobs by Using an Amazon Virtual Private Cloud.
*/ inline const VpcConfig& GetVpcConfig() const{ return m_vpcConfig; } /** *The VpcConfig * object that specifies the VPC that you want the training jobs that this * hyperparameter tuning job launches to connect to. Control access to and from * your training container by configuring the VPC. For more information, see Protect * Training Jobs by Using an Amazon Virtual Private Cloud.
*/ inline bool VpcConfigHasBeenSet() const { return m_vpcConfigHasBeenSet; } /** *The VpcConfig * object that specifies the VPC that you want the training jobs that this * hyperparameter tuning job launches to connect to. Control access to and from * your training container by configuring the VPC. For more information, see Protect * Training Jobs by Using an Amazon Virtual Private Cloud.
*/ inline void SetVpcConfig(const VpcConfig& value) { m_vpcConfigHasBeenSet = true; m_vpcConfig = value; } /** *The VpcConfig * object that specifies the VPC that you want the training jobs that this * hyperparameter tuning job launches to connect to. Control access to and from * your training container by configuring the VPC. For more information, see Protect * Training Jobs by Using an Amazon Virtual Private Cloud.
*/ inline void SetVpcConfig(VpcConfig&& value) { m_vpcConfigHasBeenSet = true; m_vpcConfig = std::move(value); } /** *The VpcConfig * object that specifies the VPC that you want the training jobs that this * hyperparameter tuning job launches to connect to. Control access to and from * your training container by configuring the VPC. For more information, see Protect * Training Jobs by Using an Amazon Virtual Private Cloud.
*/ inline HyperParameterTrainingJobDefinition& WithVpcConfig(const VpcConfig& value) { SetVpcConfig(value); return *this;} /** *The VpcConfig * object that specifies the VPC that you want the training jobs that this * hyperparameter tuning job launches to connect to. Control access to and from * your training container by configuring the VPC. For more information, see Protect * Training Jobs by Using an Amazon Virtual Private Cloud.
*/ inline HyperParameterTrainingJobDefinition& WithVpcConfig(VpcConfig&& value) { SetVpcConfig(std::move(value)); return *this;} /** *Specifies the path to the Amazon S3 bucket where you store model artifacts * from the training jobs that the tuning job launches.
*/ inline const OutputDataConfig& GetOutputDataConfig() const{ return m_outputDataConfig; } /** *Specifies the path to the Amazon S3 bucket where you store model artifacts * from the training jobs that the tuning job launches.
*/ inline bool OutputDataConfigHasBeenSet() const { return m_outputDataConfigHasBeenSet; } /** *Specifies the path to the Amazon S3 bucket where you store model artifacts * from the training jobs that the tuning job launches.
*/ inline void SetOutputDataConfig(const OutputDataConfig& value) { m_outputDataConfigHasBeenSet = true; m_outputDataConfig = value; } /** *Specifies the path to the Amazon S3 bucket where you store model artifacts * from the training jobs that the tuning job launches.
*/ inline void SetOutputDataConfig(OutputDataConfig&& value) { m_outputDataConfigHasBeenSet = true; m_outputDataConfig = std::move(value); } /** *Specifies the path to the Amazon S3 bucket where you store model artifacts * from the training jobs that the tuning job launches.
*/ inline HyperParameterTrainingJobDefinition& WithOutputDataConfig(const OutputDataConfig& value) { SetOutputDataConfig(value); return *this;} /** *Specifies the path to the Amazon S3 bucket where you store model artifacts * from the training jobs that the tuning job launches.
*/ inline HyperParameterTrainingJobDefinition& WithOutputDataConfig(OutputDataConfig&& value) { SetOutputDataConfig(std::move(value)); return *this;} /** *The resources, including the compute instances and storage volumes, to use * for the training jobs that the tuning job launches.
Storage volumes store
* model artifacts and incremental states. Training algorithms might also use
* storage volumes for scratch space. If you want SageMaker to use the storage
* volume to store the training data, choose File
as the
* TrainingInputMode
in the algorithm specification. For distributed
* training algorithms, specify an instance count greater than 1.
If
* you want to use hyperparameter optimization with instance type flexibility, use
* HyperParameterTuningResourceConfig
instead.
The resources, including the compute instances and storage volumes, to use * for the training jobs that the tuning job launches.
Storage volumes store
* model artifacts and incremental states. Training algorithms might also use
* storage volumes for scratch space. If you want SageMaker to use the storage
* volume to store the training data, choose File
as the
* TrainingInputMode
in the algorithm specification. For distributed
* training algorithms, specify an instance count greater than 1.
If
* you want to use hyperparameter optimization with instance type flexibility, use
* HyperParameterTuningResourceConfig
instead.
The resources, including the compute instances and storage volumes, to use * for the training jobs that the tuning job launches.
Storage volumes store
* model artifacts and incremental states. Training algorithms might also use
* storage volumes for scratch space. If you want SageMaker to use the storage
* volume to store the training data, choose File
as the
* TrainingInputMode
in the algorithm specification. For distributed
* training algorithms, specify an instance count greater than 1.
If
* you want to use hyperparameter optimization with instance type flexibility, use
* HyperParameterTuningResourceConfig
instead.
The resources, including the compute instances and storage volumes, to use * for the training jobs that the tuning job launches.
Storage volumes store
* model artifacts and incremental states. Training algorithms might also use
* storage volumes for scratch space. If you want SageMaker to use the storage
* volume to store the training data, choose File
as the
* TrainingInputMode
in the algorithm specification. For distributed
* training algorithms, specify an instance count greater than 1.
If
* you want to use hyperparameter optimization with instance type flexibility, use
* HyperParameterTuningResourceConfig
instead.
The resources, including the compute instances and storage volumes, to use * for the training jobs that the tuning job launches.
Storage volumes store
* model artifacts and incremental states. Training algorithms might also use
* storage volumes for scratch space. If you want SageMaker to use the storage
* volume to store the training data, choose File
as the
* TrainingInputMode
in the algorithm specification. For distributed
* training algorithms, specify an instance count greater than 1.
If
* you want to use hyperparameter optimization with instance type flexibility, use
* HyperParameterTuningResourceConfig
instead.
The resources, including the compute instances and storage volumes, to use * for the training jobs that the tuning job launches.
Storage volumes store
* model artifacts and incremental states. Training algorithms might also use
* storage volumes for scratch space. If you want SageMaker to use the storage
* volume to store the training data, choose File
as the
* TrainingInputMode
in the algorithm specification. For distributed
* training algorithms, specify an instance count greater than 1.
If
* you want to use hyperparameter optimization with instance type flexibility, use
* HyperParameterTuningResourceConfig
instead.
Specifies a limit to how long a model hyperparameter training job can run. It * also specifies how long a managed spot training job has to complete. When the * job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs.
*/ inline const StoppingCondition& GetStoppingCondition() const{ return m_stoppingCondition; } /** *Specifies a limit to how long a model hyperparameter training job can run. It * also specifies how long a managed spot training job has to complete. When the * job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs.
*/ inline bool StoppingConditionHasBeenSet() const { return m_stoppingConditionHasBeenSet; } /** *Specifies a limit to how long a model hyperparameter training job can run. It * also specifies how long a managed spot training job has to complete. When the * job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs.
*/ inline void SetStoppingCondition(const StoppingCondition& value) { m_stoppingConditionHasBeenSet = true; m_stoppingCondition = value; } /** *Specifies a limit to how long a model hyperparameter training job can run. It * also specifies how long a managed spot training job has to complete. When the * job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs.
*/ inline void SetStoppingCondition(StoppingCondition&& value) { m_stoppingConditionHasBeenSet = true; m_stoppingCondition = std::move(value); } /** *Specifies a limit to how long a model hyperparameter training job can run. It * also specifies how long a managed spot training job has to complete. When the * job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs.
*/ inline HyperParameterTrainingJobDefinition& WithStoppingCondition(const StoppingCondition& value) { SetStoppingCondition(value); return *this;} /** *Specifies a limit to how long a model hyperparameter training job can run. It * also specifies how long a managed spot training job has to complete. When the * job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs.
*/ inline HyperParameterTrainingJobDefinition& WithStoppingCondition(StoppingCondition&& value) { SetStoppingCondition(std::move(value)); return *this;} /** *Isolates the training container. No inbound or outbound network calls can be * made, except for calls between peers within a training cluster for distributed * training. If network isolation is used for training jobs that are configured to * use a VPC, SageMaker downloads and uploads customer data and model artifacts * through the specified VPC, but the training container does not have network * access.
*/ inline bool GetEnableNetworkIsolation() const{ return m_enableNetworkIsolation; } /** *Isolates the training container. No inbound or outbound network calls can be * made, except for calls between peers within a training cluster for distributed * training. If network isolation is used for training jobs that are configured to * use a VPC, SageMaker downloads and uploads customer data and model artifacts * through the specified VPC, but the training container does not have network * access.
*/ inline bool EnableNetworkIsolationHasBeenSet() const { return m_enableNetworkIsolationHasBeenSet; } /** *Isolates the training container. No inbound or outbound network calls can be * made, except for calls between peers within a training cluster for distributed * training. If network isolation is used for training jobs that are configured to * use a VPC, SageMaker downloads and uploads customer data and model artifacts * through the specified VPC, but the training container does not have network * access.
*/ inline void SetEnableNetworkIsolation(bool value) { m_enableNetworkIsolationHasBeenSet = true; m_enableNetworkIsolation = value; } /** *Isolates the training container. No inbound or outbound network calls can be * made, except for calls between peers within a training cluster for distributed * training. If network isolation is used for training jobs that are configured to * use a VPC, SageMaker downloads and uploads customer data and model artifacts * through the specified VPC, but the training container does not have network * access.
*/ inline HyperParameterTrainingJobDefinition& WithEnableNetworkIsolation(bool value) { SetEnableNetworkIsolation(value); return *this;} /** *To encrypt all communications between ML compute instances in distributed
* training, choose True
. Encryption provides greater security for
* distributed training, but training might take longer. How long it takes depends
* on the amount of communication between compute instances, especially if you use
* a deep learning algorithm in distributed training.
To encrypt all communications between ML compute instances in distributed
* training, choose True
. Encryption provides greater security for
* distributed training, but training might take longer. How long it takes depends
* on the amount of communication between compute instances, especially if you use
* a deep learning algorithm in distributed training.
To encrypt all communications between ML compute instances in distributed
* training, choose True
. Encryption provides greater security for
* distributed training, but training might take longer. How long it takes depends
* on the amount of communication between compute instances, especially if you use
* a deep learning algorithm in distributed training.
To encrypt all communications between ML compute instances in distributed
* training, choose True
. Encryption provides greater security for
* distributed training, but training might take longer. How long it takes depends
* on the amount of communication between compute instances, especially if you use
* a deep learning algorithm in distributed training.
A Boolean indicating whether managed spot training is enabled
* (True
) or not (False
).
A Boolean indicating whether managed spot training is enabled
* (True
) or not (False
).
A Boolean indicating whether managed spot training is enabled
* (True
) or not (False
).
A Boolean indicating whether managed spot training is enabled
* (True
) or not (False
).
The number of times to retry the job when the job fails due to an
* InternalServerError
.
The number of times to retry the job when the job fails due to an
* InternalServerError
.
The number of times to retry the job when the job fails due to an
* InternalServerError
.
The number of times to retry the job when the job fails due to an
* InternalServerError
.
The number of times to retry the job when the job fails due to an
* InternalServerError
.
The number of times to retry the job when the job fails due to an
* InternalServerError
.
The configuration for the hyperparameter tuning resources, including the
* compute instances and storage volumes, used for training jobs launched by the
* tuning job. By default, storage volumes hold model artifacts and incremental
* states. Choose File
for TrainingInputMode
in the
* AlgorithmSpecification
parameter to additionally store training
* data in the storage volume (optional).
The configuration for the hyperparameter tuning resources, including the
* compute instances and storage volumes, used for training jobs launched by the
* tuning job. By default, storage volumes hold model artifacts and incremental
* states. Choose File
for TrainingInputMode
in the
* AlgorithmSpecification
parameter to additionally store training
* data in the storage volume (optional).
The configuration for the hyperparameter tuning resources, including the
* compute instances and storage volumes, used for training jobs launched by the
* tuning job. By default, storage volumes hold model artifacts and incremental
* states. Choose File
for TrainingInputMode
in the
* AlgorithmSpecification
parameter to additionally store training
* data in the storage volume (optional).
The configuration for the hyperparameter tuning resources, including the
* compute instances and storage volumes, used for training jobs launched by the
* tuning job. By default, storage volumes hold model artifacts and incremental
* states. Choose File
for TrainingInputMode
in the
* AlgorithmSpecification
parameter to additionally store training
* data in the storage volume (optional).
The configuration for the hyperparameter tuning resources, including the
* compute instances and storage volumes, used for training jobs launched by the
* tuning job. By default, storage volumes hold model artifacts and incremental
* states. Choose File
for TrainingInputMode
in the
* AlgorithmSpecification
parameter to additionally store training
* data in the storage volume (optional).
The configuration for the hyperparameter tuning resources, including the
* compute instances and storage volumes, used for training jobs launched by the
* tuning job. By default, storage volumes hold model artifacts and incremental
* states. Choose File
for TrainingInputMode
in the
* AlgorithmSpecification
parameter to additionally store training
* data in the storage volume (optional).
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.
An environment variable that you can pass into the SageMaker CreateTrainingJob * API. You can use an existing environment * variable from the training container or use your own. See Define * metrics and variables for more information.
The maximum number
* of items specified for Map Entries
refers to the maximum number of
* environment variables for each TrainingJobDefinition
and also the
* maximum for the hyperparameter tuning job itself. That is, the sum of the number
* of environment variables for all the training job definitions can't exceed the
* maximum number specified.