/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include The configuration for hyperparameter tuning resources for use in training
* jobs launched by the tuning job. These resources include compute instances and
* storage volumes. Specify one or more compute instance configurations and
* allocation strategies to select resources (optional).See Also:
* AWS
* API Reference
The instance type used for processing of hyperparameter optimization jobs. * Choose from general purpose (no GPUs) instance types: ml.m5.xlarge, * ml.m5.2xlarge, and ml.m5.4xlarge or compute optimized (no GPUs) instance types: * ml.c5.xlarge and ml.c5.2xlarge. For more information about instance types, see * instance * type descriptions.
*/ inline const TrainingInstanceType& GetInstanceType() const{ return m_instanceType; } /** *The instance type used for processing of hyperparameter optimization jobs. * Choose from general purpose (no GPUs) instance types: ml.m5.xlarge, * ml.m5.2xlarge, and ml.m5.4xlarge or compute optimized (no GPUs) instance types: * ml.c5.xlarge and ml.c5.2xlarge. For more information about instance types, see * instance * type descriptions.
*/ inline bool InstanceTypeHasBeenSet() const { return m_instanceTypeHasBeenSet; } /** *The instance type used for processing of hyperparameter optimization jobs. * Choose from general purpose (no GPUs) instance types: ml.m5.xlarge, * ml.m5.2xlarge, and ml.m5.4xlarge or compute optimized (no GPUs) instance types: * ml.c5.xlarge and ml.c5.2xlarge. For more information about instance types, see * instance * type descriptions.
*/ inline void SetInstanceType(const TrainingInstanceType& value) { m_instanceTypeHasBeenSet = true; m_instanceType = value; } /** *The instance type used for processing of hyperparameter optimization jobs. * Choose from general purpose (no GPUs) instance types: ml.m5.xlarge, * ml.m5.2xlarge, and ml.m5.4xlarge or compute optimized (no GPUs) instance types: * ml.c5.xlarge and ml.c5.2xlarge. For more information about instance types, see * instance * type descriptions.
*/ inline void SetInstanceType(TrainingInstanceType&& value) { m_instanceTypeHasBeenSet = true; m_instanceType = std::move(value); } /** *The instance type used for processing of hyperparameter optimization jobs. * Choose from general purpose (no GPUs) instance types: ml.m5.xlarge, * ml.m5.2xlarge, and ml.m5.4xlarge or compute optimized (no GPUs) instance types: * ml.c5.xlarge and ml.c5.2xlarge. For more information about instance types, see * instance * type descriptions.
*/ inline HyperParameterTuningInstanceConfig& WithInstanceType(const TrainingInstanceType& value) { SetInstanceType(value); return *this;} /** *The instance type used for processing of hyperparameter optimization jobs. * Choose from general purpose (no GPUs) instance types: ml.m5.xlarge, * ml.m5.2xlarge, and ml.m5.4xlarge or compute optimized (no GPUs) instance types: * ml.c5.xlarge and ml.c5.2xlarge. For more information about instance types, see * instance * type descriptions.
*/ inline HyperParameterTuningInstanceConfig& WithInstanceType(TrainingInstanceType&& value) { SetInstanceType(std::move(value)); return *this;} /** *The number of instances of the type specified by InstanceType
.
* Choose an instance count larger than 1 for distributed training algorithms. See
* Step
* 2: Launch a SageMaker Distributed Training Job Using the SageMaker Python
* SDK for more information.
The number of instances of the type specified by InstanceType
.
* Choose an instance count larger than 1 for distributed training algorithms. See
* Step
* 2: Launch a SageMaker Distributed Training Job Using the SageMaker Python
* SDK for more information.
The number of instances of the type specified by InstanceType
.
* Choose an instance count larger than 1 for distributed training algorithms. See
* Step
* 2: Launch a SageMaker Distributed Training Job Using the SageMaker Python
* SDK for more information.
The number of instances of the type specified by InstanceType
.
* Choose an instance count larger than 1 for distributed training algorithms. See
* Step
* 2: Launch a SageMaker Distributed Training Job Using the SageMaker Python
* SDK for more information.
The volume size in GB of the data to be processed for hyperparameter * optimization (optional).
*/ inline int GetVolumeSizeInGB() const{ return m_volumeSizeInGB; } /** *The volume size in GB of the data to be processed for hyperparameter * optimization (optional).
*/ inline bool VolumeSizeInGBHasBeenSet() const { return m_volumeSizeInGBHasBeenSet; } /** *The volume size in GB of the data to be processed for hyperparameter * optimization (optional).
*/ inline void SetVolumeSizeInGB(int value) { m_volumeSizeInGBHasBeenSet = true; m_volumeSizeInGB = value; } /** *The volume size in GB of the data to be processed for hyperparameter * optimization (optional).
*/ inline HyperParameterTuningInstanceConfig& WithVolumeSizeInGB(int value) { SetVolumeSizeInGB(value); return *this;} private: TrainingInstanceType m_instanceType; bool m_instanceTypeHasBeenSet = false; int m_instanceCount; bool m_instanceCountHasBeenSet = false; int m_volumeSizeInGB; bool m_volumeSizeInGBHasBeenSet = false; }; } // namespace Model } // namespace SageMaker } // namespace Aws