/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include The infrastructure configuration for deploying the model to a real-time
* inference endpoint.See Also:
AWS
* API Reference
The instance type the model is deployed to.
*/ inline const InstanceType& GetInstanceType() const{ return m_instanceType; } /** *The instance type the model is deployed to.
*/ inline bool InstanceTypeHasBeenSet() const { return m_instanceTypeHasBeenSet; } /** *The instance type the model is deployed to.
*/ inline void SetInstanceType(const InstanceType& value) { m_instanceTypeHasBeenSet = true; m_instanceType = value; } /** *The instance type the model is deployed to.
*/ inline void SetInstanceType(InstanceType&& value) { m_instanceTypeHasBeenSet = true; m_instanceType = std::move(value); } /** *The instance type the model is deployed to.
*/ inline RealTimeInferenceConfig& WithInstanceType(const InstanceType& value) { SetInstanceType(value); return *this;} /** *The instance type the model is deployed to.
*/ inline RealTimeInferenceConfig& WithInstanceType(InstanceType&& value) { SetInstanceType(std::move(value)); return *this;} /** *The number of instances of the type specified by
* InstanceType
.
The number of instances of the type specified by
* InstanceType
.
The number of instances of the type specified by
* InstanceType
.
The number of instances of the type specified by
* InstanceType
.