/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace SageMaker { namespace Model { /** *

Describes the container, as part of model definition.

See * Also:

AWS * API Reference

*/ class ContainerDefinition { public: AWS_SAGEMAKER_API ContainerDefinition(); AWS_SAGEMAKER_API ContainerDefinition(Aws::Utils::Json::JsonView jsonValue); AWS_SAGEMAKER_API ContainerDefinition& operator=(Aws::Utils::Json::JsonView jsonValue); AWS_SAGEMAKER_API Aws::Utils::Json::JsonValue Jsonize() const; /** *

This parameter is ignored for models that contain only a * PrimaryContainer.

When a ContainerDefinition is * part of an inference pipeline, the value of the parameter uniquely identifies * the container for the purposes of logging and metrics. For information, see Use * Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a * value for this parameter for a ContainerDefinition that is part of * an inference pipeline, a unique name is automatically assigned based on the * position of the ContainerDefinition in the pipeline. If you specify * a value for the ContainerHostName for any * ContainerDefinition that is part of an inference pipeline, you must * specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline.

*/ inline const Aws::String& GetContainerHostname() const{ return m_containerHostname; } /** *

This parameter is ignored for models that contain only a * PrimaryContainer.

When a ContainerDefinition is * part of an inference pipeline, the value of the parameter uniquely identifies * the container for the purposes of logging and metrics. For information, see Use * Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a * value for this parameter for a ContainerDefinition that is part of * an inference pipeline, a unique name is automatically assigned based on the * position of the ContainerDefinition in the pipeline. If you specify * a value for the ContainerHostName for any * ContainerDefinition that is part of an inference pipeline, you must * specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline.

*/ inline bool ContainerHostnameHasBeenSet() const { return m_containerHostnameHasBeenSet; } /** *

This parameter is ignored for models that contain only a * PrimaryContainer.

When a ContainerDefinition is * part of an inference pipeline, the value of the parameter uniquely identifies * the container for the purposes of logging and metrics. For information, see Use * Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a * value for this parameter for a ContainerDefinition that is part of * an inference pipeline, a unique name is automatically assigned based on the * position of the ContainerDefinition in the pipeline. If you specify * a value for the ContainerHostName for any * ContainerDefinition that is part of an inference pipeline, you must * specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline.

*/ inline void SetContainerHostname(const Aws::String& value) { m_containerHostnameHasBeenSet = true; m_containerHostname = value; } /** *

This parameter is ignored for models that contain only a * PrimaryContainer.

When a ContainerDefinition is * part of an inference pipeline, the value of the parameter uniquely identifies * the container for the purposes of logging and metrics. For information, see Use * Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a * value for this parameter for a ContainerDefinition that is part of * an inference pipeline, a unique name is automatically assigned based on the * position of the ContainerDefinition in the pipeline. If you specify * a value for the ContainerHostName for any * ContainerDefinition that is part of an inference pipeline, you must * specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline.

*/ inline void SetContainerHostname(Aws::String&& value) { m_containerHostnameHasBeenSet = true; m_containerHostname = std::move(value); } /** *

This parameter is ignored for models that contain only a * PrimaryContainer.

When a ContainerDefinition is * part of an inference pipeline, the value of the parameter uniquely identifies * the container for the purposes of logging and metrics. For information, see Use * Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a * value for this parameter for a ContainerDefinition that is part of * an inference pipeline, a unique name is automatically assigned based on the * position of the ContainerDefinition in the pipeline. If you specify * a value for the ContainerHostName for any * ContainerDefinition that is part of an inference pipeline, you must * specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline.

*/ inline void SetContainerHostname(const char* value) { m_containerHostnameHasBeenSet = true; m_containerHostname.assign(value); } /** *

This parameter is ignored for models that contain only a * PrimaryContainer.

When a ContainerDefinition is * part of an inference pipeline, the value of the parameter uniquely identifies * the container for the purposes of logging and metrics. For information, see Use * Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a * value for this parameter for a ContainerDefinition that is part of * an inference pipeline, a unique name is automatically assigned based on the * position of the ContainerDefinition in the pipeline. If you specify * a value for the ContainerHostName for any * ContainerDefinition that is part of an inference pipeline, you must * specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline.

*/ inline ContainerDefinition& WithContainerHostname(const Aws::String& value) { SetContainerHostname(value); return *this;} /** *

This parameter is ignored for models that contain only a * PrimaryContainer.

When a ContainerDefinition is * part of an inference pipeline, the value of the parameter uniquely identifies * the container for the purposes of logging and metrics. For information, see Use * Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a * value for this parameter for a ContainerDefinition that is part of * an inference pipeline, a unique name is automatically assigned based on the * position of the ContainerDefinition in the pipeline. If you specify * a value for the ContainerHostName for any * ContainerDefinition that is part of an inference pipeline, you must * specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline.

*/ inline ContainerDefinition& WithContainerHostname(Aws::String&& value) { SetContainerHostname(std::move(value)); return *this;} /** *

This parameter is ignored for models that contain only a * PrimaryContainer.

When a ContainerDefinition is * part of an inference pipeline, the value of the parameter uniquely identifies * the container for the purposes of logging and metrics. For information, see Use * Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a * value for this parameter for a ContainerDefinition that is part of * an inference pipeline, a unique name is automatically assigned based on the * position of the ContainerDefinition in the pipeline. If you specify * a value for the ContainerHostName for any * ContainerDefinition that is part of an inference pipeline, you must * specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline.

*/ inline ContainerDefinition& WithContainerHostname(const char* value) { SetContainerHostname(value); return *this;} /** *

The path where inference code is stored. This can be either in Amazon EC2 * Container Registry or in a Docker registry that is accessible from the same VPC * that you configure for your endpoint. If you are using your own custom algorithm * instead of an algorithm provided by SageMaker, the inference code must meet * SageMaker requirements. SageMaker supports both * registry/repository[:tag] and * registry/repository[@digest] image path formats. For more * information, see Using * Your Own Algorithms with Amazon SageMaker.

The model * artifacts in an Amazon S3 bucket and the Docker image for inference container in * Amazon EC2 Container Registry must be in the same region as the model or * endpoint you are creating.

*/ inline const Aws::String& GetImage() const{ return m_image; } /** *

The path where inference code is stored. This can be either in Amazon EC2 * Container Registry or in a Docker registry that is accessible from the same VPC * that you configure for your endpoint. If you are using your own custom algorithm * instead of an algorithm provided by SageMaker, the inference code must meet * SageMaker requirements. SageMaker supports both * registry/repository[:tag] and * registry/repository[@digest] image path formats. For more * information, see Using * Your Own Algorithms with Amazon SageMaker.

The model * artifacts in an Amazon S3 bucket and the Docker image for inference container in * Amazon EC2 Container Registry must be in the same region as the model or * endpoint you are creating.

*/ inline bool ImageHasBeenSet() const { return m_imageHasBeenSet; } /** *

The path where inference code is stored. This can be either in Amazon EC2 * Container Registry or in a Docker registry that is accessible from the same VPC * that you configure for your endpoint. If you are using your own custom algorithm * instead of an algorithm provided by SageMaker, the inference code must meet * SageMaker requirements. SageMaker supports both * registry/repository[:tag] and * registry/repository[@digest] image path formats. For more * information, see Using * Your Own Algorithms with Amazon SageMaker.

The model * artifacts in an Amazon S3 bucket and the Docker image for inference container in * Amazon EC2 Container Registry must be in the same region as the model or * endpoint you are creating.

*/ inline void SetImage(const Aws::String& value) { m_imageHasBeenSet = true; m_image = value; } /** *

The path where inference code is stored. This can be either in Amazon EC2 * Container Registry or in a Docker registry that is accessible from the same VPC * that you configure for your endpoint. If you are using your own custom algorithm * instead of an algorithm provided by SageMaker, the inference code must meet * SageMaker requirements. SageMaker supports both * registry/repository[:tag] and * registry/repository[@digest] image path formats. For more * information, see Using * Your Own Algorithms with Amazon SageMaker.

The model * artifacts in an Amazon S3 bucket and the Docker image for inference container in * Amazon EC2 Container Registry must be in the same region as the model or * endpoint you are creating.

*/ inline void SetImage(Aws::String&& value) { m_imageHasBeenSet = true; m_image = std::move(value); } /** *

The path where inference code is stored. This can be either in Amazon EC2 * Container Registry or in a Docker registry that is accessible from the same VPC * that you configure for your endpoint. If you are using your own custom algorithm * instead of an algorithm provided by SageMaker, the inference code must meet * SageMaker requirements. SageMaker supports both * registry/repository[:tag] and * registry/repository[@digest] image path formats. For more * information, see Using * Your Own Algorithms with Amazon SageMaker.

The model * artifacts in an Amazon S3 bucket and the Docker image for inference container in * Amazon EC2 Container Registry must be in the same region as the model or * endpoint you are creating.

*/ inline void SetImage(const char* value) { m_imageHasBeenSet = true; m_image.assign(value); } /** *

The path where inference code is stored. This can be either in Amazon EC2 * Container Registry or in a Docker registry that is accessible from the same VPC * that you configure for your endpoint. If you are using your own custom algorithm * instead of an algorithm provided by SageMaker, the inference code must meet * SageMaker requirements. SageMaker supports both * registry/repository[:tag] and * registry/repository[@digest] image path formats. For more * information, see Using * Your Own Algorithms with Amazon SageMaker.

The model * artifacts in an Amazon S3 bucket and the Docker image for inference container in * Amazon EC2 Container Registry must be in the same region as the model or * endpoint you are creating.

*/ inline ContainerDefinition& WithImage(const Aws::String& value) { SetImage(value); return *this;} /** *

The path where inference code is stored. This can be either in Amazon EC2 * Container Registry or in a Docker registry that is accessible from the same VPC * that you configure for your endpoint. If you are using your own custom algorithm * instead of an algorithm provided by SageMaker, the inference code must meet * SageMaker requirements. SageMaker supports both * registry/repository[:tag] and * registry/repository[@digest] image path formats. For more * information, see Using * Your Own Algorithms with Amazon SageMaker.

The model * artifacts in an Amazon S3 bucket and the Docker image for inference container in * Amazon EC2 Container Registry must be in the same region as the model or * endpoint you are creating.

*/ inline ContainerDefinition& WithImage(Aws::String&& value) { SetImage(std::move(value)); return *this;} /** *

The path where inference code is stored. This can be either in Amazon EC2 * Container Registry or in a Docker registry that is accessible from the same VPC * that you configure for your endpoint. If you are using your own custom algorithm * instead of an algorithm provided by SageMaker, the inference code must meet * SageMaker requirements. SageMaker supports both * registry/repository[:tag] and * registry/repository[@digest] image path formats. For more * information, see Using * Your Own Algorithms with Amazon SageMaker.

The model * artifacts in an Amazon S3 bucket and the Docker image for inference container in * Amazon EC2 Container Registry must be in the same region as the model or * endpoint you are creating.

*/ inline ContainerDefinition& WithImage(const char* value) { SetImage(value); return *this;} /** *

Specifies whether the model container is in Amazon ECR or a private Docker * registry accessible from your Amazon Virtual Private Cloud (VPC). For * information about storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers.

*

The model artifacts in an Amazon S3 bucket and the Docker image for inference * container in Amazon EC2 Container Registry must be in the same region as the * model or endpoint you are creating.

*/ inline const ImageConfig& GetImageConfig() const{ return m_imageConfig; } /** *

Specifies whether the model container is in Amazon ECR or a private Docker * registry accessible from your Amazon Virtual Private Cloud (VPC). For * information about storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers.

*

The model artifacts in an Amazon S3 bucket and the Docker image for inference * container in Amazon EC2 Container Registry must be in the same region as the * model or endpoint you are creating.

*/ inline bool ImageConfigHasBeenSet() const { return m_imageConfigHasBeenSet; } /** *

Specifies whether the model container is in Amazon ECR or a private Docker * registry accessible from your Amazon Virtual Private Cloud (VPC). For * information about storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers.

*

The model artifacts in an Amazon S3 bucket and the Docker image for inference * container in Amazon EC2 Container Registry must be in the same region as the * model or endpoint you are creating.

*/ inline void SetImageConfig(const ImageConfig& value) { m_imageConfigHasBeenSet = true; m_imageConfig = value; } /** *

Specifies whether the model container is in Amazon ECR or a private Docker * registry accessible from your Amazon Virtual Private Cloud (VPC). For * information about storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers.

*

The model artifacts in an Amazon S3 bucket and the Docker image for inference * container in Amazon EC2 Container Registry must be in the same region as the * model or endpoint you are creating.

*/ inline void SetImageConfig(ImageConfig&& value) { m_imageConfigHasBeenSet = true; m_imageConfig = std::move(value); } /** *

Specifies whether the model container is in Amazon ECR or a private Docker * registry accessible from your Amazon Virtual Private Cloud (VPC). For * information about storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers.

*

The model artifacts in an Amazon S3 bucket and the Docker image for inference * container in Amazon EC2 Container Registry must be in the same region as the * model or endpoint you are creating.

*/ inline ContainerDefinition& WithImageConfig(const ImageConfig& value) { SetImageConfig(value); return *this;} /** *

Specifies whether the model container is in Amazon ECR or a private Docker * registry accessible from your Amazon Virtual Private Cloud (VPC). For * information about storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers.

*

The model artifacts in an Amazon S3 bucket and the Docker image for inference * container in Amazon EC2 Container Registry must be in the same region as the * model or endpoint you are creating.

*/ inline ContainerDefinition& WithImageConfig(ImageConfig&& value) { SetImageConfig(std::move(value)); return *this;} /** *

Whether the container hosts a single model or multiple models.

*/ inline const ContainerMode& GetMode() const{ return m_mode; } /** *

Whether the container hosts a single model or multiple models.

*/ inline bool ModeHasBeenSet() const { return m_modeHasBeenSet; } /** *

Whether the container hosts a single model or multiple models.

*/ inline void SetMode(const ContainerMode& value) { m_modeHasBeenSet = true; m_mode = value; } /** *

Whether the container hosts a single model or multiple models.

*/ inline void SetMode(ContainerMode&& value) { m_modeHasBeenSet = true; m_mode = std::move(value); } /** *

Whether the container hosts a single model or multiple models.

*/ inline ContainerDefinition& WithMode(const ContainerMode& value) { SetMode(value); return *this;} /** *

Whether the container hosts a single model or multiple models.

*/ inline ContainerDefinition& WithMode(ContainerMode&& value) { SetMode(std::move(value)); return *this;} /** *

The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for SageMaker built-in algorithms, but not if * you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

The model artifacts must be in an S3 bucket that * is in the same region as the model or endpoint you are creating.

*

If you provide a value for this parameter, SageMaker uses Amazon Web Services * Security Token Service to download model artifacts from the S3 path you provide. * Amazon Web Services STS is activated in your Amazon Web Services account by * default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more * information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in * the Amazon Web Services Identity and Access Management User Guide.

*

If you use a built-in algorithm to create a model, SageMaker * requires that you provide a S3 path to the model artifacts in * ModelDataUrl.

*/ inline const Aws::String& GetModelDataUrl() const{ return m_modelDataUrl; } /** *

The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for SageMaker built-in algorithms, but not if * you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

The model artifacts must be in an S3 bucket that * is in the same region as the model or endpoint you are creating.

*

If you provide a value for this parameter, SageMaker uses Amazon Web Services * Security Token Service to download model artifacts from the S3 path you provide. * Amazon Web Services STS is activated in your Amazon Web Services account by * default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more * information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in * the Amazon Web Services Identity and Access Management User Guide.

*

If you use a built-in algorithm to create a model, SageMaker * requires that you provide a S3 path to the model artifacts in * ModelDataUrl.

*/ inline bool ModelDataUrlHasBeenSet() const { return m_modelDataUrlHasBeenSet; } /** *

The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for SageMaker built-in algorithms, but not if * you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

The model artifacts must be in an S3 bucket that * is in the same region as the model or endpoint you are creating.

*

If you provide a value for this parameter, SageMaker uses Amazon Web Services * Security Token Service to download model artifacts from the S3 path you provide. * Amazon Web Services STS is activated in your Amazon Web Services account by * default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more * information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in * the Amazon Web Services Identity and Access Management User Guide.

*

If you use a built-in algorithm to create a model, SageMaker * requires that you provide a S3 path to the model artifacts in * ModelDataUrl.

*/ inline void SetModelDataUrl(const Aws::String& value) { m_modelDataUrlHasBeenSet = true; m_modelDataUrl = value; } /** *

The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for SageMaker built-in algorithms, but not if * you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

The model artifacts must be in an S3 bucket that * is in the same region as the model or endpoint you are creating.

*

If you provide a value for this parameter, SageMaker uses Amazon Web Services * Security Token Service to download model artifacts from the S3 path you provide. * Amazon Web Services STS is activated in your Amazon Web Services account by * default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more * information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in * the Amazon Web Services Identity and Access Management User Guide.

*

If you use a built-in algorithm to create a model, SageMaker * requires that you provide a S3 path to the model artifacts in * ModelDataUrl.

*/ inline void SetModelDataUrl(Aws::String&& value) { m_modelDataUrlHasBeenSet = true; m_modelDataUrl = std::move(value); } /** *

The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for SageMaker built-in algorithms, but not if * you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

The model artifacts must be in an S3 bucket that * is in the same region as the model or endpoint you are creating.

*

If you provide a value for this parameter, SageMaker uses Amazon Web Services * Security Token Service to download model artifacts from the S3 path you provide. * Amazon Web Services STS is activated in your Amazon Web Services account by * default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more * information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in * the Amazon Web Services Identity and Access Management User Guide.

*

If you use a built-in algorithm to create a model, SageMaker * requires that you provide a S3 path to the model artifacts in * ModelDataUrl.

*/ inline void SetModelDataUrl(const char* value) { m_modelDataUrlHasBeenSet = true; m_modelDataUrl.assign(value); } /** *

The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for SageMaker built-in algorithms, but not if * you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

The model artifacts must be in an S3 bucket that * is in the same region as the model or endpoint you are creating.

*

If you provide a value for this parameter, SageMaker uses Amazon Web Services * Security Token Service to download model artifacts from the S3 path you provide. * Amazon Web Services STS is activated in your Amazon Web Services account by * default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more * information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in * the Amazon Web Services Identity and Access Management User Guide.

*

If you use a built-in algorithm to create a model, SageMaker * requires that you provide a S3 path to the model artifacts in * ModelDataUrl.

*/ inline ContainerDefinition& WithModelDataUrl(const Aws::String& value) { SetModelDataUrl(value); return *this;} /** *

The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for SageMaker built-in algorithms, but not if * you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

The model artifacts must be in an S3 bucket that * is in the same region as the model or endpoint you are creating.

*

If you provide a value for this parameter, SageMaker uses Amazon Web Services * Security Token Service to download model artifacts from the S3 path you provide. * Amazon Web Services STS is activated in your Amazon Web Services account by * default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more * information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in * the Amazon Web Services Identity and Access Management User Guide.

*

If you use a built-in algorithm to create a model, SageMaker * requires that you provide a S3 path to the model artifacts in * ModelDataUrl.

*/ inline ContainerDefinition& WithModelDataUrl(Aws::String&& value) { SetModelDataUrl(std::move(value)); return *this;} /** *

The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for SageMaker built-in algorithms, but not if * you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

The model artifacts must be in an S3 bucket that * is in the same region as the model or endpoint you are creating.

*

If you provide a value for this parameter, SageMaker uses Amazon Web Services * Security Token Service to download model artifacts from the S3 path you provide. * Amazon Web Services STS is activated in your Amazon Web Services account by * default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more * information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in * the Amazon Web Services Identity and Access Management User Guide.

*

If you use a built-in algorithm to create a model, SageMaker * requires that you provide a S3 path to the model artifacts in * ModelDataUrl.

*/ inline ContainerDefinition& WithModelDataUrl(const char* value) { SetModelDataUrl(value); return *this;} /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline const Aws::Map& GetEnvironment() const{ return m_environment; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline bool EnvironmentHasBeenSet() const { return m_environmentHasBeenSet; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline void SetEnvironment(const Aws::Map& value) { m_environmentHasBeenSet = true; m_environment = value; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline void SetEnvironment(Aws::Map&& value) { m_environmentHasBeenSet = true; m_environment = std::move(value); } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& WithEnvironment(const Aws::Map& value) { SetEnvironment(value); return *this;} /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& WithEnvironment(Aws::Map&& value) { SetEnvironment(std::move(value)); return *this;} /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& AddEnvironment(const Aws::String& key, const Aws::String& value) { m_environmentHasBeenSet = true; m_environment.emplace(key, value); return *this; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& AddEnvironment(Aws::String&& key, const Aws::String& value) { m_environmentHasBeenSet = true; m_environment.emplace(std::move(key), value); return *this; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& AddEnvironment(const Aws::String& key, Aws::String&& value) { m_environmentHasBeenSet = true; m_environment.emplace(key, std::move(value)); return *this; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& AddEnvironment(Aws::String&& key, Aws::String&& value) { m_environmentHasBeenSet = true; m_environment.emplace(std::move(key), std::move(value)); return *this; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& AddEnvironment(const char* key, Aws::String&& value) { m_environmentHasBeenSet = true; m_environment.emplace(key, std::move(value)); return *this; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& AddEnvironment(Aws::String&& key, const char* value) { m_environmentHasBeenSet = true; m_environment.emplace(std::move(key), value); return *this; } /** *

The environment variables to set in the Docker container. Each key and value * in the Environment string to string map can have length of up to * 1024. We support up to 16 entries in the map.

*/ inline ContainerDefinition& AddEnvironment(const char* key, const char* value) { m_environmentHasBeenSet = true; m_environment.emplace(key, value); return *this; } /** *

The name or Amazon Resource Name (ARN) of the model package to use to create * the model.

*/ inline const Aws::String& GetModelPackageName() const{ return m_modelPackageName; } /** *

The name or Amazon Resource Name (ARN) of the model package to use to create * the model.

*/ inline bool ModelPackageNameHasBeenSet() const { return m_modelPackageNameHasBeenSet; } /** *

The name or Amazon Resource Name (ARN) of the model package to use to create * the model.

*/ inline void SetModelPackageName(const Aws::String& value) { m_modelPackageNameHasBeenSet = true; m_modelPackageName = value; } /** *

The name or Amazon Resource Name (ARN) of the model package to use to create * the model.

*/ inline void SetModelPackageName(Aws::String&& value) { m_modelPackageNameHasBeenSet = true; m_modelPackageName = std::move(value); } /** *

The name or Amazon Resource Name (ARN) of the model package to use to create * the model.

*/ inline void SetModelPackageName(const char* value) { m_modelPackageNameHasBeenSet = true; m_modelPackageName.assign(value); } /** *

The name or Amazon Resource Name (ARN) of the model package to use to create * the model.

*/ inline ContainerDefinition& WithModelPackageName(const Aws::String& value) { SetModelPackageName(value); return *this;} /** *

The name or Amazon Resource Name (ARN) of the model package to use to create * the model.

*/ inline ContainerDefinition& WithModelPackageName(Aws::String&& value) { SetModelPackageName(std::move(value)); return *this;} /** *

The name or Amazon Resource Name (ARN) of the model package to use to create * the model.

*/ inline ContainerDefinition& WithModelPackageName(const char* value) { SetModelPackageName(value); return *this;} /** *

The inference specification name in the model package version.

*/ inline const Aws::String& GetInferenceSpecificationName() const{ return m_inferenceSpecificationName; } /** *

The inference specification name in the model package version.

*/ inline bool InferenceSpecificationNameHasBeenSet() const { return m_inferenceSpecificationNameHasBeenSet; } /** *

The inference specification name in the model package version.

*/ inline void SetInferenceSpecificationName(const Aws::String& value) { m_inferenceSpecificationNameHasBeenSet = true; m_inferenceSpecificationName = value; } /** *

The inference specification name in the model package version.

*/ inline void SetInferenceSpecificationName(Aws::String&& value) { m_inferenceSpecificationNameHasBeenSet = true; m_inferenceSpecificationName = std::move(value); } /** *

The inference specification name in the model package version.

*/ inline void SetInferenceSpecificationName(const char* value) { m_inferenceSpecificationNameHasBeenSet = true; m_inferenceSpecificationName.assign(value); } /** *

The inference specification name in the model package version.

*/ inline ContainerDefinition& WithInferenceSpecificationName(const Aws::String& value) { SetInferenceSpecificationName(value); return *this;} /** *

The inference specification name in the model package version.

*/ inline ContainerDefinition& WithInferenceSpecificationName(Aws::String&& value) { SetInferenceSpecificationName(std::move(value)); return *this;} /** *

The inference specification name in the model package version.

*/ inline ContainerDefinition& WithInferenceSpecificationName(const char* value) { SetInferenceSpecificationName(value); return *this;} /** *

Specifies additional configuration for multi-model endpoints.

*/ inline const MultiModelConfig& GetMultiModelConfig() const{ return m_multiModelConfig; } /** *

Specifies additional configuration for multi-model endpoints.

*/ inline bool MultiModelConfigHasBeenSet() const { return m_multiModelConfigHasBeenSet; } /** *

Specifies additional configuration for multi-model endpoints.

*/ inline void SetMultiModelConfig(const MultiModelConfig& value) { m_multiModelConfigHasBeenSet = true; m_multiModelConfig = value; } /** *

Specifies additional configuration for multi-model endpoints.

*/ inline void SetMultiModelConfig(MultiModelConfig&& value) { m_multiModelConfigHasBeenSet = true; m_multiModelConfig = std::move(value); } /** *

Specifies additional configuration for multi-model endpoints.

*/ inline ContainerDefinition& WithMultiModelConfig(const MultiModelConfig& value) { SetMultiModelConfig(value); return *this;} /** *

Specifies additional configuration for multi-model endpoints.

*/ inline ContainerDefinition& WithMultiModelConfig(MultiModelConfig&& value) { SetMultiModelConfig(std::move(value)); return *this;} /** *

Specifies the location of ML model data to deploy.

Currently * you cannot use ModelDataSource in conjunction with SageMaker batch * transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and * SageMaker Marketplace.

*/ inline const ModelDataSource& GetModelDataSource() const{ return m_modelDataSource; } /** *

Specifies the location of ML model data to deploy.

Currently * you cannot use ModelDataSource in conjunction with SageMaker batch * transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and * SageMaker Marketplace.

*/ inline bool ModelDataSourceHasBeenSet() const { return m_modelDataSourceHasBeenSet; } /** *

Specifies the location of ML model data to deploy.

Currently * you cannot use ModelDataSource in conjunction with SageMaker batch * transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and * SageMaker Marketplace.

*/ inline void SetModelDataSource(const ModelDataSource& value) { m_modelDataSourceHasBeenSet = true; m_modelDataSource = value; } /** *

Specifies the location of ML model data to deploy.

Currently * you cannot use ModelDataSource in conjunction with SageMaker batch * transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and * SageMaker Marketplace.

*/ inline void SetModelDataSource(ModelDataSource&& value) { m_modelDataSourceHasBeenSet = true; m_modelDataSource = std::move(value); } /** *

Specifies the location of ML model data to deploy.

Currently * you cannot use ModelDataSource in conjunction with SageMaker batch * transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and * SageMaker Marketplace.

*/ inline ContainerDefinition& WithModelDataSource(const ModelDataSource& value) { SetModelDataSource(value); return *this;} /** *

Specifies the location of ML model data to deploy.

Currently * you cannot use ModelDataSource in conjunction with SageMaker batch * transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and * SageMaker Marketplace.

*/ inline ContainerDefinition& WithModelDataSource(ModelDataSource&& value) { SetModelDataSource(std::move(value)); return *this;} private: Aws::String m_containerHostname; bool m_containerHostnameHasBeenSet = false; Aws::String m_image; bool m_imageHasBeenSet = false; ImageConfig m_imageConfig; bool m_imageConfigHasBeenSet = false; ContainerMode m_mode; bool m_modeHasBeenSet = false; Aws::String m_modelDataUrl; bool m_modelDataUrlHasBeenSet = false; Aws::Map m_environment; bool m_environmentHasBeenSet = false; Aws::String m_modelPackageName; bool m_modelPackageNameHasBeenSet = false; Aws::String m_inferenceSpecificationName; bool m_inferenceSpecificationNameHasBeenSet = false; MultiModelConfig m_multiModelConfig; bool m_multiModelConfigHasBeenSet = false; ModelDataSource m_modelDataSource; bool m_modelDataSourceHasBeenSet = false; }; } // namespace Model } // namespace SageMaker } // namespace Aws