/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Identifies the resources, ML compute instances, and ML storage volumes to
* deploy for a processing job. In distributed training, you specify more than one
* instance.See Also:
AWS
* API Reference
The configuration for the resources in a cluster used to run the processing * job.
*/ inline const ProcessingClusterConfig& GetClusterConfig() const{ return m_clusterConfig; } /** *The configuration for the resources in a cluster used to run the processing * job.
*/ inline bool ClusterConfigHasBeenSet() const { return m_clusterConfigHasBeenSet; } /** *The configuration for the resources in a cluster used to run the processing * job.
*/ inline void SetClusterConfig(const ProcessingClusterConfig& value) { m_clusterConfigHasBeenSet = true; m_clusterConfig = value; } /** *The configuration for the resources in a cluster used to run the processing * job.
*/ inline void SetClusterConfig(ProcessingClusterConfig&& value) { m_clusterConfigHasBeenSet = true; m_clusterConfig = std::move(value); } /** *The configuration for the resources in a cluster used to run the processing * job.
*/ inline ProcessingResources& WithClusterConfig(const ProcessingClusterConfig& value) { SetClusterConfig(value); return *this;} /** *The configuration for the resources in a cluster used to run the processing * job.
*/ inline ProcessingResources& WithClusterConfig(ProcessingClusterConfig&& value) { SetClusterConfig(std::move(value)); return *this;} private: ProcessingClusterConfig m_clusterConfig; bool m_clusterConfigHasBeenSet = false; }; } // namespace Model } // namespace SageMaker } // namespace Aws