/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.sagemaker.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* Defines how to perform inference generation after a training job is run. *
* * @see AWS * API Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class InferenceSpecification implements Serializable, Cloneable, StructuredPojo { /** ** The Amazon ECR registry path of the Docker image that contains the inference code. *
*/ private java.util.List* A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed. *
** This parameter is required for unversioned models, and optional for versioned models. *
*/ private java.util.List* A list of the instance types that are used to generate inferences in real-time. *
** This parameter is required for unversioned models, and optional for versioned models. *
*/ private java.util.List* The supported MIME types for the input data. *
*/ private java.util.List* The supported MIME types for the output data. *
*/ private java.util.List* The Amazon ECR registry path of the Docker image that contains the inference code. *
* * @return The Amazon ECR registry path of the Docker image that contains the inference code. */ public java.util.List* The Amazon ECR registry path of the Docker image that contains the inference code. *
* * @param containers * The Amazon ECR registry path of the Docker image that contains the inference code. */ public void setContainers(java.util.Collection* The Amazon ECR registry path of the Docker image that contains the inference code. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setContainers(java.util.Collection)} or {@link #withContainers(java.util.Collection)} if you want to * override the existing values. *
* * @param containers * The Amazon ECR registry path of the Docker image that contains the inference code. * @return Returns a reference to this object so that method calls can be chained together. */ public InferenceSpecification withContainers(ModelPackageContainerDefinition... containers) { if (this.containers == null) { setContainers(new java.util.ArrayList* The Amazon ECR registry path of the Docker image that contains the inference code. *
* * @param containers * The Amazon ECR registry path of the Docker image that contains the inference code. * @return Returns a reference to this object so that method calls can be chained together. */ public InferenceSpecification withContainers(java.util.Collection* A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed. *
** This parameter is required for unversioned models, and optional for versioned models. *
* * @return A list of the instance types on which a transformation job can be run or on which an endpoint can be * deployed. *
* This parameter is required for unversioned models, and optional for versioned models.
* @see TransformInstanceType
*/
public java.util.List
* A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @see TransformInstanceType
*/
public void setSupportedTransformInstanceTypes(java.util.Collection
* A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setSupportedTransformInstanceTypes(java.util.Collection)} or
* {@link #withSupportedTransformInstanceTypes(java.util.Collection)} if you want to override the existing values.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @return Returns a reference to this object so that method calls can be chained together.
* @see TransformInstanceType
*/
public InferenceSpecification withSupportedTransformInstanceTypes(String... supportedTransformInstanceTypes) {
if (this.supportedTransformInstanceTypes == null) {
setSupportedTransformInstanceTypes(new java.util.ArrayList
* A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @return Returns a reference to this object so that method calls can be chained together.
* @see TransformInstanceType
*/
public InferenceSpecification withSupportedTransformInstanceTypes(java.util.Collection
* A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @return Returns a reference to this object so that method calls can be chained together.
* @see TransformInstanceType
*/
public InferenceSpecification withSupportedTransformInstanceTypes(TransformInstanceType... supportedTransformInstanceTypes) {
java.util.ArrayList
* A list of the instance types that are used to generate inferences in real-time.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @see ProductionVariantInstanceType
*/
public java.util.List
* A list of the instance types that are used to generate inferences in real-time.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @see ProductionVariantInstanceType
*/
public void setSupportedRealtimeInferenceInstanceTypes(java.util.Collection
* A list of the instance types that are used to generate inferences in real-time.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setSupportedRealtimeInferenceInstanceTypes(java.util.Collection)} or
* {@link #withSupportedRealtimeInferenceInstanceTypes(java.util.Collection)} if you want to override the existing
* values.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ProductionVariantInstanceType
*/
public InferenceSpecification withSupportedRealtimeInferenceInstanceTypes(String... supportedRealtimeInferenceInstanceTypes) {
if (this.supportedRealtimeInferenceInstanceTypes == null) {
setSupportedRealtimeInferenceInstanceTypes(new java.util.ArrayList
* A list of the instance types that are used to generate inferences in real-time.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ProductionVariantInstanceType
*/
public InferenceSpecification withSupportedRealtimeInferenceInstanceTypes(java.util.Collection
* A list of the instance types that are used to generate inferences in real-time.
*
* This parameter is required for unversioned models, and optional for versioned models.
*
* This parameter is required for unversioned models, and optional for versioned models.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ProductionVariantInstanceType
*/
public InferenceSpecification withSupportedRealtimeInferenceInstanceTypes(ProductionVariantInstanceType... supportedRealtimeInferenceInstanceTypes) {
java.util.ArrayList
* The supported MIME types for the input data.
*
* The supported MIME types for the input data.
*
* The supported MIME types for the input data.
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setSupportedContentTypes(java.util.Collection)} or
* {@link #withSupportedContentTypes(java.util.Collection)} if you want to override the existing values.
*
* The supported MIME types for the input data.
*
* The supported MIME types for the output data.
*
* The supported MIME types for the output data.
*
* The supported MIME types for the output data.
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setSupportedResponseMIMETypes(java.util.Collection)} or
* {@link #withSupportedResponseMIMETypes(java.util.Collection)} if you want to override the existing values.
*
* The supported MIME types for the output data.
*