/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Defines how to perform inference generation after a training job is
* run.See Also:
AWS
* API Reference
The Amazon ECR registry path of the Docker image that contains the inference * code.
*/ inline const Aws::VectorThe Amazon ECR registry path of the Docker image that contains the inference * code.
*/ inline bool ContainersHasBeenSet() const { return m_containersHasBeenSet; } /** *The Amazon ECR registry path of the Docker image that contains the inference * code.
*/ inline void SetContainers(const Aws::VectorThe Amazon ECR registry path of the Docker image that contains the inference * code.
*/ inline void SetContainers(Aws::VectorThe Amazon ECR registry path of the Docker image that contains the inference * code.
*/ inline InferenceSpecification& WithContainers(const Aws::VectorThe Amazon ECR registry path of the Docker image that contains the inference * code.
*/ inline InferenceSpecification& WithContainers(Aws::VectorThe Amazon ECR registry path of the Docker image that contains the inference * code.
*/ inline InferenceSpecification& AddContainers(const ModelPackageContainerDefinition& value) { m_containersHasBeenSet = true; m_containers.push_back(value); return *this; } /** *The Amazon ECR registry path of the Docker image that contains the inference * code.
*/ inline InferenceSpecification& AddContainers(ModelPackageContainerDefinition&& value) { m_containersHasBeenSet = true; m_containers.push_back(std::move(value)); return *this; } /** *A list of the instance types on which a transformation job can be run or on * which an endpoint can be deployed.
This parameter is required for * unversioned models, and optional for versioned models.
*/ inline const Aws::VectorA list of the instance types on which a transformation job can be run or on * which an endpoint can be deployed.
This parameter is required for * unversioned models, and optional for versioned models.
*/ inline bool SupportedTransformInstanceTypesHasBeenSet() const { return m_supportedTransformInstanceTypesHasBeenSet; } /** *A list of the instance types on which a transformation job can be run or on * which an endpoint can be deployed.
This parameter is required for * unversioned models, and optional for versioned models.
*/ inline void SetSupportedTransformInstanceTypes(const Aws::VectorA list of the instance types on which a transformation job can be run or on * which an endpoint can be deployed.
This parameter is required for * unversioned models, and optional for versioned models.
*/ inline void SetSupportedTransformInstanceTypes(Aws::VectorA list of the instance types on which a transformation job can be run or on * which an endpoint can be deployed.
This parameter is required for * unversioned models, and optional for versioned models.
*/ inline InferenceSpecification& WithSupportedTransformInstanceTypes(const Aws::VectorA list of the instance types on which a transformation job can be run or on * which an endpoint can be deployed.
This parameter is required for * unversioned models, and optional for versioned models.
*/ inline InferenceSpecification& WithSupportedTransformInstanceTypes(Aws::VectorA list of the instance types on which a transformation job can be run or on * which an endpoint can be deployed.
This parameter is required for * unversioned models, and optional for versioned models.
*/ inline InferenceSpecification& AddSupportedTransformInstanceTypes(const TransformInstanceType& value) { m_supportedTransformInstanceTypesHasBeenSet = true; m_supportedTransformInstanceTypes.push_back(value); return *this; } /** *A list of the instance types on which a transformation job can be run or on * which an endpoint can be deployed.
This parameter is required for * unversioned models, and optional for versioned models.
*/ inline InferenceSpecification& AddSupportedTransformInstanceTypes(TransformInstanceType&& value) { m_supportedTransformInstanceTypesHasBeenSet = true; m_supportedTransformInstanceTypes.push_back(std::move(value)); return *this; } /** *A list of the instance types that are used to generate inferences in * real-time.
This parameter is required for unversioned models, and * optional for versioned models.
*/ inline const Aws::VectorA list of the instance types that are used to generate inferences in * real-time.
This parameter is required for unversioned models, and * optional for versioned models.
*/ inline bool SupportedRealtimeInferenceInstanceTypesHasBeenSet() const { return m_supportedRealtimeInferenceInstanceTypesHasBeenSet; } /** *A list of the instance types that are used to generate inferences in * real-time.
This parameter is required for unversioned models, and * optional for versioned models.
*/ inline void SetSupportedRealtimeInferenceInstanceTypes(const Aws::VectorA list of the instance types that are used to generate inferences in * real-time.
This parameter is required for unversioned models, and * optional for versioned models.
*/ inline void SetSupportedRealtimeInferenceInstanceTypes(Aws::VectorA list of the instance types that are used to generate inferences in * real-time.
This parameter is required for unversioned models, and * optional for versioned models.
*/ inline InferenceSpecification& WithSupportedRealtimeInferenceInstanceTypes(const Aws::VectorA list of the instance types that are used to generate inferences in * real-time.
This parameter is required for unversioned models, and * optional for versioned models.
*/ inline InferenceSpecification& WithSupportedRealtimeInferenceInstanceTypes(Aws::VectorA list of the instance types that are used to generate inferences in * real-time.
This parameter is required for unversioned models, and * optional for versioned models.
*/ inline InferenceSpecification& AddSupportedRealtimeInferenceInstanceTypes(const ProductionVariantInstanceType& value) { m_supportedRealtimeInferenceInstanceTypesHasBeenSet = true; m_supportedRealtimeInferenceInstanceTypes.push_back(value); return *this; } /** *A list of the instance types that are used to generate inferences in * real-time.
This parameter is required for unversioned models, and * optional for versioned models.
*/ inline InferenceSpecification& AddSupportedRealtimeInferenceInstanceTypes(ProductionVariantInstanceType&& value) { m_supportedRealtimeInferenceInstanceTypesHasBeenSet = true; m_supportedRealtimeInferenceInstanceTypes.push_back(std::move(value)); return *this; } /** *The supported MIME types for the input data.
*/ inline const Aws::VectorThe supported MIME types for the input data.
*/ inline bool SupportedContentTypesHasBeenSet() const { return m_supportedContentTypesHasBeenSet; } /** *The supported MIME types for the input data.
*/ inline void SetSupportedContentTypes(const Aws::VectorThe supported MIME types for the input data.
*/ inline void SetSupportedContentTypes(Aws::VectorThe supported MIME types for the input data.
*/ inline InferenceSpecification& WithSupportedContentTypes(const Aws::VectorThe supported MIME types for the input data.
*/ inline InferenceSpecification& WithSupportedContentTypes(Aws::VectorThe supported MIME types for the input data.
*/ inline InferenceSpecification& AddSupportedContentTypes(const Aws::String& value) { m_supportedContentTypesHasBeenSet = true; m_supportedContentTypes.push_back(value); return *this; } /** *The supported MIME types for the input data.
*/ inline InferenceSpecification& AddSupportedContentTypes(Aws::String&& value) { m_supportedContentTypesHasBeenSet = true; m_supportedContentTypes.push_back(std::move(value)); return *this; } /** *The supported MIME types for the input data.
*/ inline InferenceSpecification& AddSupportedContentTypes(const char* value) { m_supportedContentTypesHasBeenSet = true; m_supportedContentTypes.push_back(value); return *this; } /** *The supported MIME types for the output data.
*/ inline const Aws::VectorThe supported MIME types for the output data.
*/ inline bool SupportedResponseMIMETypesHasBeenSet() const { return m_supportedResponseMIMETypesHasBeenSet; } /** *The supported MIME types for the output data.
*/ inline void SetSupportedResponseMIMETypes(const Aws::VectorThe supported MIME types for the output data.
*/ inline void SetSupportedResponseMIMETypes(Aws::VectorThe supported MIME types for the output data.
*/ inline InferenceSpecification& WithSupportedResponseMIMETypes(const Aws::VectorThe supported MIME types for the output data.
*/ inline InferenceSpecification& WithSupportedResponseMIMETypes(Aws::VectorThe supported MIME types for the output data.
*/ inline InferenceSpecification& AddSupportedResponseMIMETypes(const Aws::String& value) { m_supportedResponseMIMETypesHasBeenSet = true; m_supportedResponseMIMETypes.push_back(value); return *this; } /** *The supported MIME types for the output data.
*/ inline InferenceSpecification& AddSupportedResponseMIMETypes(Aws::String&& value) { m_supportedResponseMIMETypesHasBeenSet = true; m_supportedResponseMIMETypes.push_back(std::move(value)); return *this; } /** *The supported MIME types for the output data.
*/ inline InferenceSpecification& AddSupportedResponseMIMETypes(const char* value) { m_supportedResponseMIMETypesHasBeenSet = true; m_supportedResponseMIMETypes.push_back(value); return *this; } private: Aws::Vector