/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.sagemaker.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *

* Describes the container, as part of model definition. *

* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class ContainerDefinition implements Serializable, Cloneable, StructuredPojo { /** *

* This parameter is ignored for models that contain only a PrimaryContainer. *

*

* When a ContainerDefinition is part of an inference pipeline, the value of the parameter uniquely * identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics * to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a * ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned * based on the position of the ContainerDefinition in the pipeline. If you specify a value for the * ContainerHostName for any ContainerDefinition that is part of an inference pipeline, * you must specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline. *

*/ private String containerHostname; /** *

* The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a Docker * registry that is accessible from the same VPC that you configure for your endpoint. If you are using your own * custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker * requirements. SageMaker supports both registry/repository[:tag] and * registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon * SageMaker. *

* *

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container * Registry must be in the same region as the model or endpoint you are creating. *

*
*/ private String image; /** *

* Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon * Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, see Use a * Private Docker Registry for Real-Time Inference Containers. *

* *

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container * Registry must be in the same region as the model or endpoint you are creating. *

*
*/ private ImageConfig imageConfig; /** *

* Whether the container hosts a single model or multiple models. *

*/ private String mode; /** *

* The S3 path where the model artifacts, which result from model training, are stored. This path must point to a * single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for SageMaker built-in algorithms, * but not if you use your own algorithms. For more information on built-in algorithms, see Common * Parameters. *

* *

* The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are creating. *

*
*

* If you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token Service to download * model artifacts from the S3 path you provide. Amazon Web Services STS is activated in your Amazon Web Services * account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate * Amazon Web Services STS for that region. For more information, see Activating and * Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity * and Access Management User Guide. *

* *

* If you use a built-in algorithm to create a model, SageMaker requires that you provide a S3 path to the model * artifacts in ModelDataUrl. *

*
*/ private String modelDataUrl; /** *

* The environment variables to set in the Docker container. Each key and value in the Environment * string to string map can have length of up to 1024. We support up to 16 entries in the map. *

*/ private java.util.Map environment; /** *

* The name or Amazon Resource Name (ARN) of the model package to use to create the model. *

*/ private String modelPackageName; /** *

* The inference specification name in the model package version. *

*/ private String inferenceSpecificationName; /** *

* Specifies additional configuration for multi-model endpoints. *

*/ private MultiModelConfig multiModelConfig; /** *

* Specifies the location of ML model data to deploy. *

* *

* Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, SageMaker * serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace. *

*
*/ private ModelDataSource modelDataSource; /** *

* This parameter is ignored for models that contain only a PrimaryContainer. *

*

* When a ContainerDefinition is part of an inference pipeline, the value of the parameter uniquely * identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics * to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a * ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned * based on the position of the ContainerDefinition in the pipeline. If you specify a value for the * ContainerHostName for any ContainerDefinition that is part of an inference pipeline, * you must specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline. *

* * @param containerHostname * This parameter is ignored for models that contain only a PrimaryContainer.

*

* When a ContainerDefinition is part of an inference pipeline, the value of the parameter * uniquely identifies the container for the purposes of logging and metrics. For information, see Use Logs and * Metrics to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a * ContainerDefinition that is part of an inference pipeline, a unique name is automatically * assigned based on the position of the ContainerDefinition in the pipeline. If you specify a * value for the ContainerHostName for any ContainerDefinition that is part of an * inference pipeline, you must specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline. */ public void setContainerHostname(String containerHostname) { this.containerHostname = containerHostname; } /** *

* This parameter is ignored for models that contain only a PrimaryContainer. *

*

* When a ContainerDefinition is part of an inference pipeline, the value of the parameter uniquely * identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics * to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a * ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned * based on the position of the ContainerDefinition in the pipeline. If you specify a value for the * ContainerHostName for any ContainerDefinition that is part of an inference pipeline, * you must specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline. *

* * @return This parameter is ignored for models that contain only a PrimaryContainer.

*

* When a ContainerDefinition is part of an inference pipeline, the value of the parameter * uniquely identifies the container for the purposes of logging and metrics. For information, see Use Logs and * Metrics to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a * ContainerDefinition that is part of an inference pipeline, a unique name is automatically * assigned based on the position of the ContainerDefinition in the pipeline. If you specify a * value for the ContainerHostName for any ContainerDefinition that is part of an * inference pipeline, you must specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline. */ public String getContainerHostname() { return this.containerHostname; } /** *

* This parameter is ignored for models that contain only a PrimaryContainer. *

*

* When a ContainerDefinition is part of an inference pipeline, the value of the parameter uniquely * identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics * to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a * ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned * based on the position of the ContainerDefinition in the pipeline. If you specify a value for the * ContainerHostName for any ContainerDefinition that is part of an inference pipeline, * you must specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline. *

* * @param containerHostname * This parameter is ignored for models that contain only a PrimaryContainer.

*

* When a ContainerDefinition is part of an inference pipeline, the value of the parameter * uniquely identifies the container for the purposes of logging and metrics. For information, see Use Logs and * Metrics to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a * ContainerDefinition that is part of an inference pipeline, a unique name is automatically * assigned based on the position of the ContainerDefinition in the pipeline. If you specify a * value for the ContainerHostName for any ContainerDefinition that is part of an * inference pipeline, you must specify a value for the ContainerHostName parameter of every * ContainerDefinition in that pipeline. * @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withContainerHostname(String containerHostname) { setContainerHostname(containerHostname); return this; } /** *

* The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a Docker * registry that is accessible from the same VPC that you configure for your endpoint. If you are using your own * custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker * requirements. SageMaker supports both registry/repository[:tag] and * registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon * SageMaker. *

* *

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container * Registry must be in the same region as the model or endpoint you are creating. *

*
* * @param image * The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a * Docker registry that is accessible from the same VPC that you configure for your endpoint. If you are * using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must * meet SageMaker requirements. SageMaker supports both registry/repository[:tag] and * registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with * Amazon SageMaker.

*

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 * Container Registry must be in the same region as the model or endpoint you are creating. *

*/ public void setImage(String image) { this.image = image; } /** *

* The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a Docker * registry that is accessible from the same VPC that you configure for your endpoint. If you are using your own * custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker * requirements. SageMaker supports both registry/repository[:tag] and * registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon * SageMaker. *

* *

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container * Registry must be in the same region as the model or endpoint you are creating. *

*
* * @return The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a * Docker registry that is accessible from the same VPC that you configure for your endpoint. If you are * using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must * meet SageMaker requirements. SageMaker supports both registry/repository[:tag] and * registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms * with Amazon SageMaker.

*

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 * Container Registry must be in the same region as the model or endpoint you are creating. *

*/ public String getImage() { return this.image; } /** *

* The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a Docker * registry that is accessible from the same VPC that you configure for your endpoint. If you are using your own * custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker * requirements. SageMaker supports both registry/repository[:tag] and * registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon * SageMaker. *

* *

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container * Registry must be in the same region as the model or endpoint you are creating. *

*
* * @param image * The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a * Docker registry that is accessible from the same VPC that you configure for your endpoint. If you are * using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must * meet SageMaker requirements. SageMaker supports both registry/repository[:tag] and * registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with * Amazon SageMaker.

*

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 * Container Registry must be in the same region as the model or endpoint you are creating. *

* @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withImage(String image) { setImage(image); return this; } /** *

* Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon * Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, see Use a * Private Docker Registry for Real-Time Inference Containers. *

* *

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container * Registry must be in the same region as the model or endpoint you are creating. *

*
* * @param imageConfig * Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your * Amazon Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, * see Use a Private Docker Registry for Real-Time Inference Containers.

*

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 * Container Registry must be in the same region as the model or endpoint you are creating. *

*/ public void setImageConfig(ImageConfig imageConfig) { this.imageConfig = imageConfig; } /** *

* Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon * Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, see Use a * Private Docker Registry for Real-Time Inference Containers. *

* *

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container * Registry must be in the same region as the model or endpoint you are creating. *

*
* * @return Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your * Amazon Virtual Private Cloud (VPC). For information about storing containers in a private Docker * registry, see Use a * Private Docker Registry for Real-Time Inference Containers.

*

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 * Container Registry must be in the same region as the model or endpoint you are creating. *

*/ public ImageConfig getImageConfig() { return this.imageConfig; } /** *

* Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon * Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, see Use a * Private Docker Registry for Real-Time Inference Containers. *

* *

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container * Registry must be in the same region as the model or endpoint you are creating. *

*
* * @param imageConfig * Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your * Amazon Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, * see Use a Private Docker Registry for Real-Time Inference Containers.

*

* The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 * Container Registry must be in the same region as the model or endpoint you are creating. *

* @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withImageConfig(ImageConfig imageConfig) { setImageConfig(imageConfig); return this; } /** *

* Whether the container hosts a single model or multiple models. *

* * @param mode * Whether the container hosts a single model or multiple models. * @see ContainerMode */ public void setMode(String mode) { this.mode = mode; } /** *

* Whether the container hosts a single model or multiple models. *

* * @return Whether the container hosts a single model or multiple models. * @see ContainerMode */ public String getMode() { return this.mode; } /** *

* Whether the container hosts a single model or multiple models. *

* * @param mode * Whether the container hosts a single model or multiple models. * @return Returns a reference to this object so that method calls can be chained together. * @see ContainerMode */ public ContainerDefinition withMode(String mode) { setMode(mode); return this; } /** *

* Whether the container hosts a single model or multiple models. *

* * @param mode * Whether the container hosts a single model or multiple models. * @return Returns a reference to this object so that method calls can be chained together. * @see ContainerMode */ public ContainerDefinition withMode(ContainerMode mode) { this.mode = mode.toString(); return this; } /** *

* The S3 path where the model artifacts, which result from model training, are stored. This path must point to a * single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for SageMaker built-in algorithms, * but not if you use your own algorithms. For more information on built-in algorithms, see Common * Parameters. *

* *

* The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are creating. *

*
*

* If you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token Service to download * model artifacts from the S3 path you provide. Amazon Web Services STS is activated in your Amazon Web Services * account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate * Amazon Web Services STS for that region. For more information, see Activating and * Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity * and Access Management User Guide. *

* *

* If you use a built-in algorithm to create a model, SageMaker requires that you provide a S3 path to the model * artifacts in ModelDataUrl. *

*
* * @param modelDataUrl * The S3 path where the model artifacts, which result from model training, are stored. This path must point * to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for SageMaker built-in * algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

*

* The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are * creating. *

*
*

* If you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token Service to * download model artifacts from the S3 path you provide. Amazon Web Services STS is activated in your Amazon * Web Services account by default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web * Services Identity and Access Management User Guide. *

* *

* If you use a built-in algorithm to create a model, SageMaker requires that you provide a S3 path to the * model artifacts in ModelDataUrl. *

*/ public void setModelDataUrl(String modelDataUrl) { this.modelDataUrl = modelDataUrl; } /** *

* The S3 path where the model artifacts, which result from model training, are stored. This path must point to a * single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for SageMaker built-in algorithms, * but not if you use your own algorithms. For more information on built-in algorithms, see Common * Parameters. *

* *

* The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are creating. *

*
*

* If you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token Service to download * model artifacts from the S3 path you provide. Amazon Web Services STS is activated in your Amazon Web Services * account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate * Amazon Web Services STS for that region. For more information, see Activating and * Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity * and Access Management User Guide. *

* *

* If you use a built-in algorithm to create a model, SageMaker requires that you provide a S3 path to the model * artifacts in ModelDataUrl. *

*
* * @return The S3 path where the model artifacts, which result from model training, are stored. This path must point * to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for SageMaker built-in * algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

*

* The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are * creating. *

*
*

* If you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token Service to * download model artifacts from the S3 path you provide. Amazon Web Services STS is activated in your * Amazon Web Services account by default. If you previously deactivated Amazon Web Services STS for a * region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and * Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services * Identity and Access Management User Guide. *

* *

* If you use a built-in algorithm to create a model, SageMaker requires that you provide a S3 path to the * model artifacts in ModelDataUrl. *

*/ public String getModelDataUrl() { return this.modelDataUrl; } /** *

* The S3 path where the model artifacts, which result from model training, are stored. This path must point to a * single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for SageMaker built-in algorithms, * but not if you use your own algorithms. For more information on built-in algorithms, see Common * Parameters. *

* *

* The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are creating. *

*
*

* If you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token Service to download * model artifacts from the S3 path you provide. Amazon Web Services STS is activated in your Amazon Web Services * account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate * Amazon Web Services STS for that region. For more information, see Activating and * Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity * and Access Management User Guide. *

* *

* If you use a built-in algorithm to create a model, SageMaker requires that you provide a S3 path to the model * artifacts in ModelDataUrl. *

*
* * @param modelDataUrl * The S3 path where the model artifacts, which result from model training, are stored. This path must point * to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for SageMaker built-in * algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common * Parameters.

*

* The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are * creating. *

*
*

* If you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token Service to * download model artifacts from the S3 path you provide. Amazon Web Services STS is activated in your Amazon * Web Services account by default. If you previously deactivated Amazon Web Services STS for a region, you * need to reactivate Amazon Web Services STS for that region. For more information, see Activating * and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web * Services Identity and Access Management User Guide. *

* *

* If you use a built-in algorithm to create a model, SageMaker requires that you provide a S3 path to the * model artifacts in ModelDataUrl. *

* @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withModelDataUrl(String modelDataUrl) { setModelDataUrl(modelDataUrl); return this; } /** *

* The environment variables to set in the Docker container. Each key and value in the Environment * string to string map can have length of up to 1024. We support up to 16 entries in the map. *

* * @return The environment variables to set in the Docker container. Each key and value in the * Environment string to string map can have length of up to 1024. We support up to 16 entries * in the map. */ public java.util.Map getEnvironment() { return environment; } /** *

* The environment variables to set in the Docker container. Each key and value in the Environment * string to string map can have length of up to 1024. We support up to 16 entries in the map. *

* * @param environment * The environment variables to set in the Docker container. Each key and value in the * Environment string to string map can have length of up to 1024. We support up to 16 entries * in the map. */ public void setEnvironment(java.util.Map environment) { this.environment = environment; } /** *

* The environment variables to set in the Docker container. Each key and value in the Environment * string to string map can have length of up to 1024. We support up to 16 entries in the map. *

* * @param environment * The environment variables to set in the Docker container. Each key and value in the * Environment string to string map can have length of up to 1024. We support up to 16 entries * in the map. * @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withEnvironment(java.util.Map environment) { setEnvironment(environment); return this; } /** * Add a single Environment entry * * @see ContainerDefinition#withEnvironment * @returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition addEnvironmentEntry(String key, String value) { if (null == this.environment) { this.environment = new java.util.HashMap(); } if (this.environment.containsKey(key)) throw new IllegalArgumentException("Duplicated keys (" + key.toString() + ") are provided."); this.environment.put(key, value); return this; } /** * Removes all the entries added into Environment. * * @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition clearEnvironmentEntries() { this.environment = null; return this; } /** *

* The name or Amazon Resource Name (ARN) of the model package to use to create the model. *

* * @param modelPackageName * The name or Amazon Resource Name (ARN) of the model package to use to create the model. */ public void setModelPackageName(String modelPackageName) { this.modelPackageName = modelPackageName; } /** *

* The name or Amazon Resource Name (ARN) of the model package to use to create the model. *

* * @return The name or Amazon Resource Name (ARN) of the model package to use to create the model. */ public String getModelPackageName() { return this.modelPackageName; } /** *

* The name or Amazon Resource Name (ARN) of the model package to use to create the model. *

* * @param modelPackageName * The name or Amazon Resource Name (ARN) of the model package to use to create the model. * @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withModelPackageName(String modelPackageName) { setModelPackageName(modelPackageName); return this; } /** *

* The inference specification name in the model package version. *

* * @param inferenceSpecificationName * The inference specification name in the model package version. */ public void setInferenceSpecificationName(String inferenceSpecificationName) { this.inferenceSpecificationName = inferenceSpecificationName; } /** *

* The inference specification name in the model package version. *

* * @return The inference specification name in the model package version. */ public String getInferenceSpecificationName() { return this.inferenceSpecificationName; } /** *

* The inference specification name in the model package version. *

* * @param inferenceSpecificationName * The inference specification name in the model package version. * @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withInferenceSpecificationName(String inferenceSpecificationName) { setInferenceSpecificationName(inferenceSpecificationName); return this; } /** *

* Specifies additional configuration for multi-model endpoints. *

* * @param multiModelConfig * Specifies additional configuration for multi-model endpoints. */ public void setMultiModelConfig(MultiModelConfig multiModelConfig) { this.multiModelConfig = multiModelConfig; } /** *

* Specifies additional configuration for multi-model endpoints. *

* * @return Specifies additional configuration for multi-model endpoints. */ public MultiModelConfig getMultiModelConfig() { return this.multiModelConfig; } /** *

* Specifies additional configuration for multi-model endpoints. *

* * @param multiModelConfig * Specifies additional configuration for multi-model endpoints. * @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withMultiModelConfig(MultiModelConfig multiModelConfig) { setMultiModelConfig(multiModelConfig); return this; } /** *

* Specifies the location of ML model data to deploy. *

* *

* Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, SageMaker * serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace. *

*
* * @param modelDataSource * Specifies the location of ML model data to deploy.

*

* Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, * SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace. *

*/ public void setModelDataSource(ModelDataSource modelDataSource) { this.modelDataSource = modelDataSource; } /** *

* Specifies the location of ML model data to deploy. *

* *

* Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, SageMaker * serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace. *

*
* * @return Specifies the location of ML model data to deploy.

*

* Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, * SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace. *

*/ public ModelDataSource getModelDataSource() { return this.modelDataSource; } /** *

* Specifies the location of ML model data to deploy. *

* *

* Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, SageMaker * serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace. *

*
* * @param modelDataSource * Specifies the location of ML model data to deploy.

*

* Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, * SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace. *

* @return Returns a reference to this object so that method calls can be chained together. */ public ContainerDefinition withModelDataSource(ModelDataSource modelDataSource) { setModelDataSource(modelDataSource); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getContainerHostname() != null) sb.append("ContainerHostname: ").append(getContainerHostname()).append(","); if (getImage() != null) sb.append("Image: ").append(getImage()).append(","); if (getImageConfig() != null) sb.append("ImageConfig: ").append(getImageConfig()).append(","); if (getMode() != null) sb.append("Mode: ").append(getMode()).append(","); if (getModelDataUrl() != null) sb.append("ModelDataUrl: ").append(getModelDataUrl()).append(","); if (getEnvironment() != null) sb.append("Environment: ").append(getEnvironment()).append(","); if (getModelPackageName() != null) sb.append("ModelPackageName: ").append(getModelPackageName()).append(","); if (getInferenceSpecificationName() != null) sb.append("InferenceSpecificationName: ").append(getInferenceSpecificationName()).append(","); if (getMultiModelConfig() != null) sb.append("MultiModelConfig: ").append(getMultiModelConfig()).append(","); if (getModelDataSource() != null) sb.append("ModelDataSource: ").append(getModelDataSource()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof ContainerDefinition == false) return false; ContainerDefinition other = (ContainerDefinition) obj; if (other.getContainerHostname() == null ^ this.getContainerHostname() == null) return false; if (other.getContainerHostname() != null && other.getContainerHostname().equals(this.getContainerHostname()) == false) return false; if (other.getImage() == null ^ this.getImage() == null) return false; if (other.getImage() != null && other.getImage().equals(this.getImage()) == false) return false; if (other.getImageConfig() == null ^ this.getImageConfig() == null) return false; if (other.getImageConfig() != null && other.getImageConfig().equals(this.getImageConfig()) == false) return false; if (other.getMode() == null ^ this.getMode() == null) return false; if (other.getMode() != null && other.getMode().equals(this.getMode()) == false) return false; if (other.getModelDataUrl() == null ^ this.getModelDataUrl() == null) return false; if (other.getModelDataUrl() != null && other.getModelDataUrl().equals(this.getModelDataUrl()) == false) return false; if (other.getEnvironment() == null ^ this.getEnvironment() == null) return false; if (other.getEnvironment() != null && other.getEnvironment().equals(this.getEnvironment()) == false) return false; if (other.getModelPackageName() == null ^ this.getModelPackageName() == null) return false; if (other.getModelPackageName() != null && other.getModelPackageName().equals(this.getModelPackageName()) == false) return false; if (other.getInferenceSpecificationName() == null ^ this.getInferenceSpecificationName() == null) return false; if (other.getInferenceSpecificationName() != null && other.getInferenceSpecificationName().equals(this.getInferenceSpecificationName()) == false) return false; if (other.getMultiModelConfig() == null ^ this.getMultiModelConfig() == null) return false; if (other.getMultiModelConfig() != null && other.getMultiModelConfig().equals(this.getMultiModelConfig()) == false) return false; if (other.getModelDataSource() == null ^ this.getModelDataSource() == null) return false; if (other.getModelDataSource() != null && other.getModelDataSource().equals(this.getModelDataSource()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getContainerHostname() == null) ? 0 : getContainerHostname().hashCode()); hashCode = prime * hashCode + ((getImage() == null) ? 0 : getImage().hashCode()); hashCode = prime * hashCode + ((getImageConfig() == null) ? 0 : getImageConfig().hashCode()); hashCode = prime * hashCode + ((getMode() == null) ? 0 : getMode().hashCode()); hashCode = prime * hashCode + ((getModelDataUrl() == null) ? 0 : getModelDataUrl().hashCode()); hashCode = prime * hashCode + ((getEnvironment() == null) ? 0 : getEnvironment().hashCode()); hashCode = prime * hashCode + ((getModelPackageName() == null) ? 0 : getModelPackageName().hashCode()); hashCode = prime * hashCode + ((getInferenceSpecificationName() == null) ? 0 : getInferenceSpecificationName().hashCode()); hashCode = prime * hashCode + ((getMultiModelConfig() == null) ? 0 : getMultiModelConfig().hashCode()); hashCode = prime * hashCode + ((getModelDataSource() == null) ? 0 : getModelDataSource().hashCode()); return hashCode; } @Override public ContainerDefinition clone() { try { return (ContainerDefinition) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.sagemaker.model.transform.ContainerDefinitionMarshaller.getInstance().marshall(this, protocolMarshaller); } }