/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.sagemaker.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *

* Contains information about the output location for the compiled model and the target device that the model runs on. * TargetDevice and TargetPlatform are mutually exclusive, so you need to choose one between * the two to specify your target device or platform. If you cannot find your device you want to use from the * TargetDevice list, use TargetPlatform to describe the platform of your edge device and * CompilerOptions if there are specific settings that are required or recommended to use for particular * TargetPlatform. *

* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class OutputConfig implements Serializable, Cloneable, StructuredPojo { /** *

* Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, * s3://bucket-name/key-name-prefix. *

*/ private String s3OutputLocation; /** *

* Identifies the target device or the machine learning instance that you want to run your model on after the * compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform * fields. It can be used instead of TargetPlatform. *

* *

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is * available only in US East (Ohio) Region. *

*
*/ private String targetDevice; /** *

* Contains information about a target platform that you want your model to run on, such as OS, architecture, and * accelerators. It is an alternative of TargetDevice. *

*

* The following examples show how to configure the TargetPlatform and CompilerOptions * JSON strings for popular target platforms: *

* */ private TargetPlatform targetPlatform; /** *

* Specifies additional parameters for compiler options in JSON format. The compiler options are * TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU * compilations. For any other cases, it is optional to specify CompilerOptions. *

* */ private String compilerOptions; /** *

* The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to * encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a * KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more * information, see KMS-Managed Encryption * Keys in the Amazon Simple Storage Service Developer Guide. *

*

* The KmsKeyId can be any of the following formats: *

* */ private String kmsKeyId; /** *

* Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, * s3://bucket-name/key-name-prefix. *

* * @param s3OutputLocation * Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, * s3://bucket-name/key-name-prefix. */ public void setS3OutputLocation(String s3OutputLocation) { this.s3OutputLocation = s3OutputLocation; } /** *

* Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, * s3://bucket-name/key-name-prefix. *

* * @return Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, * s3://bucket-name/key-name-prefix. */ public String getS3OutputLocation() { return this.s3OutputLocation; } /** *

* Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, * s3://bucket-name/key-name-prefix. *

* * @param s3OutputLocation * Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, * s3://bucket-name/key-name-prefix. * @return Returns a reference to this object so that method calls can be chained together. */ public OutputConfig withS3OutputLocation(String s3OutputLocation) { setS3OutputLocation(s3OutputLocation); return this; } /** *

* Identifies the target device or the machine learning instance that you want to run your model on after the * compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform * fields. It can be used instead of TargetPlatform. *

* *

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is * available only in US East (Ohio) Region. *

*
* * @param targetDevice * Identifies the target device or the machine learning instance that you want to run your model on after the * compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

*

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 * is available only in US East (Ohio) Region. *

* @see TargetDevice */ public void setTargetDevice(String targetDevice) { this.targetDevice = targetDevice; } /** *

* Identifies the target device or the machine learning instance that you want to run your model on after the * compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform * fields. It can be used instead of TargetPlatform. *

* *

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is * available only in US East (Ohio) Region. *

*
* * @return Identifies the target device or the machine learning instance that you want to run your model on after * the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform * fields. It can be used instead of TargetPlatform.

*

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and * ml_inf2 is available only in US East (Ohio) Region. *

* @see TargetDevice */ public String getTargetDevice() { return this.targetDevice; } /** *

* Identifies the target device or the machine learning instance that you want to run your model on after the * compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform * fields. It can be used instead of TargetPlatform. *

* *

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is * available only in US East (Ohio) Region. *

*
* * @param targetDevice * Identifies the target device or the machine learning instance that you want to run your model on after the * compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

*

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 * is available only in US East (Ohio) Region. *

* @return Returns a reference to this object so that method calls can be chained together. * @see TargetDevice */ public OutputConfig withTargetDevice(String targetDevice) { setTargetDevice(targetDevice); return this; } /** *

* Identifies the target device or the machine learning instance that you want to run your model on after the * compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform * fields. It can be used instead of TargetPlatform. *

* *

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is * available only in US East (Ohio) Region. *

*
* * @param targetDevice * Identifies the target device or the machine learning instance that you want to run your model on after the * compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

*

* Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 * is available only in US East (Ohio) Region. *

* @return Returns a reference to this object so that method calls can be chained together. * @see TargetDevice */ public OutputConfig withTargetDevice(TargetDevice targetDevice) { this.targetDevice = targetDevice.toString(); return this; } /** *

* Contains information about a target platform that you want your model to run on, such as OS, architecture, and * accelerators. It is an alternative of TargetDevice. *

*

* The following examples show how to configure the TargetPlatform and CompilerOptions * JSON strings for popular target platforms: *

*
    *
  • *

    * Raspberry Pi 3 Model B+ *

    *

    * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, *

    *

    * "CompilerOptions": {'mattr': ['+neon']} *

    *
  • *
  • *

    * Jetson TX2 *

    *

    * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"}, *

    *

    * "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'} *

    *
  • *
  • *

    * EC2 m5.2xlarge instance OS *

    *

    * "TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", "Accelerator": "NVIDIA"}, *

    *

    * "CompilerOptions": {'mcpu': 'skylake-avx512'} *

    *
  • *
  • *

    * RK3399 *

    *

    * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "MALI"} *

    *
  • *
  • *

    * ARMv7 phone (CPU) *

    *

    * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, *

    *

    * "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} *

    *
  • *
  • *

    * ARMv8 phone (CPU) *

    *

    * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, *

    *

    * "CompilerOptions": {'ANDROID_PLATFORM': 29} *

    *
  • *
* * @param targetPlatform * Contains information about a target platform that you want your model to run on, such as OS, architecture, * and accelerators. It is an alternative of TargetDevice.

*

* The following examples show how to configure the TargetPlatform and * CompilerOptions JSON strings for popular target platforms: *

*
    *
  • *

    * Raspberry Pi 3 Model B+ *

    *

    * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, *

    *

    * "CompilerOptions": {'mattr': ['+neon']} *

    *
  • *
  • *

    * Jetson TX2 *

    *

    * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"}, *

    *

    * "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'} *

    *
  • *
  • *

    * EC2 m5.2xlarge instance OS *

    *

    * "TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", "Accelerator": "NVIDIA"}, *

    *

    * "CompilerOptions": {'mcpu': 'skylake-avx512'} *

    *
  • *
  • *

    * RK3399 *

    *

    * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "MALI"} *

    *
  • *
  • *

    * ARMv7 phone (CPU) *

    *

    * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, *

    *

    * "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} *

    *
  • *
  • *

    * ARMv8 phone (CPU) *

    *

    * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, *

    *

    * "CompilerOptions": {'ANDROID_PLATFORM': 29} *

    *
  • */ public void setTargetPlatform(TargetPlatform targetPlatform) { this.targetPlatform = targetPlatform; } /** *

    * Contains information about a target platform that you want your model to run on, such as OS, architecture, and * accelerators. It is an alternative of TargetDevice. *

    *

    * The following examples show how to configure the TargetPlatform and CompilerOptions * JSON strings for popular target platforms: *

    *
      *
    • *

      * Raspberry Pi 3 Model B+ *

      *

      * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, *

      *

      * "CompilerOptions": {'mattr': ['+neon']} *

      *
    • *
    • *

      * Jetson TX2 *

      *

      * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"}, *

      *

      * "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'} *

      *
    • *
    • *

      * EC2 m5.2xlarge instance OS *

      *

      * "TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", "Accelerator": "NVIDIA"}, *

      *

      * "CompilerOptions": {'mcpu': 'skylake-avx512'} *

      *
    • *
    • *

      * RK3399 *

      *

      * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "MALI"} *

      *
    • *
    • *

      * ARMv7 phone (CPU) *

      *

      * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, *

      *

      * "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} *

      *
    • *
    • *

      * ARMv8 phone (CPU) *

      *

      * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, *

      *

      * "CompilerOptions": {'ANDROID_PLATFORM': 29} *

      *
    • *
    * * @return Contains information about a target platform that you want your model to run on, such as OS, * architecture, and accelerators. It is an alternative of TargetDevice.

    *

    * The following examples show how to configure the TargetPlatform and * CompilerOptions JSON strings for popular target platforms: *

    *
      *
    • *

      * Raspberry Pi 3 Model B+ *

      *

      * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, *

      *

      * "CompilerOptions": {'mattr': ['+neon']} *

      *
    • *
    • *

      * Jetson TX2 *

      *

      * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"}, *

      *

      * "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'} *

      *
    • *
    • *

      * EC2 m5.2xlarge instance OS *

      *

      * "TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", "Accelerator": "NVIDIA"}, *

      *

      * "CompilerOptions": {'mcpu': 'skylake-avx512'} *

      *
    • *
    • *

      * RK3399 *

      *

      * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "MALI"} *

      *
    • *
    • *

      * ARMv7 phone (CPU) *

      *

      * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, *

      *

      * "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} *

      *
    • *
    • *

      * ARMv8 phone (CPU) *

      *

      * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, *

      *

      * "CompilerOptions": {'ANDROID_PLATFORM': 29} *

      *
    • */ public TargetPlatform getTargetPlatform() { return this.targetPlatform; } /** *

      * Contains information about a target platform that you want your model to run on, such as OS, architecture, and * accelerators. It is an alternative of TargetDevice. *

      *

      * The following examples show how to configure the TargetPlatform and CompilerOptions * JSON strings for popular target platforms: *

      *
        *
      • *

        * Raspberry Pi 3 Model B+ *

        *

        * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, *

        *

        * "CompilerOptions": {'mattr': ['+neon']} *

        *
      • *
      • *

        * Jetson TX2 *

        *

        * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"}, *

        *

        * "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'} *

        *
      • *
      • *

        * EC2 m5.2xlarge instance OS *

        *

        * "TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", "Accelerator": "NVIDIA"}, *

        *

        * "CompilerOptions": {'mcpu': 'skylake-avx512'} *

        *
      • *
      • *

        * RK3399 *

        *

        * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "MALI"} *

        *
      • *
      • *

        * ARMv7 phone (CPU) *

        *

        * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, *

        *

        * "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} *

        *
      • *
      • *

        * ARMv8 phone (CPU) *

        *

        * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, *

        *

        * "CompilerOptions": {'ANDROID_PLATFORM': 29} *

        *
      • *
      * * @param targetPlatform * Contains information about a target platform that you want your model to run on, such as OS, architecture, * and accelerators. It is an alternative of TargetDevice.

      *

      * The following examples show how to configure the TargetPlatform and * CompilerOptions JSON strings for popular target platforms: *

      *
        *
      • *

        * Raspberry Pi 3 Model B+ *

        *

        * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, *

        *

        * "CompilerOptions": {'mattr': ['+neon']} *

        *
      • *
      • *

        * Jetson TX2 *

        *

        * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"}, *

        *

        * "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'} *

        *
      • *
      • *

        * EC2 m5.2xlarge instance OS *

        *

        * "TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", "Accelerator": "NVIDIA"}, *

        *

        * "CompilerOptions": {'mcpu': 'skylake-avx512'} *

        *
      • *
      • *

        * RK3399 *

        *

        * "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "MALI"} *

        *
      • *
      • *

        * ARMv7 phone (CPU) *

        *

        * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, *

        *

        * "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} *

        *
      • *
      • *

        * ARMv8 phone (CPU) *

        *

        * "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, *

        *

        * "CompilerOptions": {'ANDROID_PLATFORM': 29} *

        *
      • * @return Returns a reference to this object so that method calls can be chained together. */ public OutputConfig withTargetPlatform(TargetPlatform targetPlatform) { setTargetPlatform(targetPlatform); return this; } /** *

        * Specifies additional parameters for compiler options in JSON format. The compiler options are * TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU * compilations. For any other cases, it is optional to specify CompilerOptions. *

        *
          *
        • *

          * DTYPE: Specifies the data type for the input. When compiling for ml_* (except for * ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. * "float32" is used if "DTYPE" is not specified. Options for data type are: *

          *
            *
          • *

            * float32: Use either "float" or "float32". *

            *
          • *
          • *

            * int64: Use either "int64" or "long". *

            *
          • *
          *

          * For example, {"dtype" : "float32"}. *

          *
        • *
        • *

          * CPU: Compilation for CPU supports the following compiler options. *

          *
            *
          • *

            * mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} *

            *
          • *
          • *

            * mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} *

            *
          • *
          *
        • *
        • *

          * ARM: Details of ARM CPU compilations. *

          *
            *
          • *

            * NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. *

            *

            * For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform * with the NEON support. *

            *
          • *
          *
        • *
        • *

          * NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. *

          *
            *
          • *

            * gpu_code: Specifies the targeted architecture. *

            *
          • *
          • *

            * trt-ver: Specifies the TensorRT versions in x.y.z. format. *

            *
          • *
          • *

            * cuda-ver: Specifies the CUDA version in x.y format. *

            *
          • *
          *

          * For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} *

          *
        • *
        • *

          * ANDROID: Compilation for the Android OS supports the following compiler options: *

          *
            *
          • *

            * ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For * example, {'ANDROID_PLATFORM': 28}. *

            *
          • *
          • *

            * mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit * platform with NEON support. *

            *
          • *
          *
        • *
        • *

          * INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For * example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". *

          *

          * For information about supported compiler options, see Neuron * Compiler CLI. *

          *
        • *
        • *

          * CoreML: Compilation for the CoreML OutputConfig * TargetDevice supports the following compiler options: *

          *
            *
          • *

            * class_labels: Specifies the classification labels file name inside input tar.gz file. For example, * {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by * newlines. *

            *
          • *
          *
        • *
        • *

          * EIA: Compilation for the Elastic Inference Accelerator supports the following compiler options: *

          *
            *
          • *

            * precision_mode: Specifies the precision of compiled artifacts. Supported values are * "FP16" and "FP32". Default is "FP32". *

            *
          • *
          • *

            * signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults is * TensorFlow's default signature def key. *

            *
          • *
          • *

            * output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set at most * one API field, either: signature_def_key or output_names. *

            *
          • *
          *

          * For example: {"precision_mode": "FP32", "output_names": ["output:0"]} *

          *
        • *
        * * @param compilerOptions * Specifies additional parameters for compiler options in JSON format. The compiler options are * TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for * CPU compilations. For any other cases, it is optional to specify CompilerOptions.

        *
          *
        • *

          * DTYPE: Specifies the data type for the input. When compiling for ml_* (except * for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's * input. "float32" is used if "DTYPE" is not specified. Options for data type are: *

          *
            *
          • *

            * float32: Use either "float" or "float32". *

            *
          • *
          • *

            * int64: Use either "int64" or "long". *

            *
          • *
          *

          * For example, {"dtype" : "float32"}. *

          *
        • *
        • *

          * CPU: Compilation for CPU supports the following compiler options. *

          *
            *
          • *

            * mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} *

            *
          • *
          • *

            * mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} *

            *
          • *
          *
        • *
        • *

          * ARM: Details of ARM CPU compilations. *

          *
            *
          • *

            * NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. *

            *

            * For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit * platform with the NEON support. *

            *
          • *
          *
        • *
        • *

          * NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. *

          *
            *
          • *

            * gpu_code: Specifies the targeted architecture. *

            *
          • *
          • *

            * trt-ver: Specifies the TensorRT versions in x.y.z. format. *

            *
          • *
          • *

            * cuda-ver: Specifies the CUDA version in x.y format. *

            *
          • *
          *

          * For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} *

          *
        • *
        • *

          * ANDROID: Compilation for the Android OS supports the following compiler options: *

          *
            *
          • *

            * ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For * example, {'ANDROID_PLATFORM': 28}. *

            *
          • *
          • *

            * mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit * platform with NEON support. *

            *
          • *
          *
        • *
        • *

          * INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. * For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". *

          *

          * For information about supported compiler options, see Neuron * Compiler CLI. *

          *
        • *
        • *

          * CoreML: Compilation for the CoreML OutputConfig * TargetDevice supports the following compiler options: *

          *
            *
          • *

            * class_labels: Specifies the classification labels file name inside input tar.gz file. For * example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be * separated by newlines. *

            *
          • *
          *
        • *
        • *

          * EIA: Compilation for the Elastic Inference Accelerator supports the following compiler * options: *

          *
            *
          • *

            * precision_mode: Specifies the precision of compiled artifacts. Supported values are * "FP16" and "FP32". Default is "FP32". *

            *
          • *
          • *

            * signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults * is TensorFlow's default signature def key. *

            *
          • *
          • *

            * output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set * at most one API field, either: signature_def_key or output_names. *

            *
          • *
          *

          * For example: {"precision_mode": "FP32", "output_names": ["output:0"]} *

          *
        • */ public void setCompilerOptions(String compilerOptions) { this.compilerOptions = compilerOptions; } /** *

          * Specifies additional parameters for compiler options in JSON format. The compiler options are * TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU * compilations. For any other cases, it is optional to specify CompilerOptions. *

          *
            *
          • *

            * DTYPE: Specifies the data type for the input. When compiling for ml_* (except for * ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. * "float32" is used if "DTYPE" is not specified. Options for data type are: *

            *
              *
            • *

              * float32: Use either "float" or "float32". *

              *
            • *
            • *

              * int64: Use either "int64" or "long". *

              *
            • *
            *

            * For example, {"dtype" : "float32"}. *

            *
          • *
          • *

            * CPU: Compilation for CPU supports the following compiler options. *

            *
              *
            • *

              * mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} *

              *
            • *
            • *

              * mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} *

              *
            • *
            *
          • *
          • *

            * ARM: Details of ARM CPU compilations. *

            *
              *
            • *

              * NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. *

              *

              * For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform * with the NEON support. *

              *
            • *
            *
          • *
          • *

            * NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. *

            *
              *
            • *

              * gpu_code: Specifies the targeted architecture. *

              *
            • *
            • *

              * trt-ver: Specifies the TensorRT versions in x.y.z. format. *

              *
            • *
            • *

              * cuda-ver: Specifies the CUDA version in x.y format. *

              *
            • *
            *

            * For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} *

            *
          • *
          • *

            * ANDROID: Compilation for the Android OS supports the following compiler options: *

            *
              *
            • *

              * ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For * example, {'ANDROID_PLATFORM': 28}. *

              *
            • *
            • *

              * mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit * platform with NEON support. *

              *
            • *
            *
          • *
          • *

            * INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For * example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". *

            *

            * For information about supported compiler options, see Neuron * Compiler CLI. *

            *
          • *
          • *

            * CoreML: Compilation for the CoreML OutputConfig * TargetDevice supports the following compiler options: *

            *
              *
            • *

              * class_labels: Specifies the classification labels file name inside input tar.gz file. For example, * {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by * newlines. *

              *
            • *
            *
          • *
          • *

            * EIA: Compilation for the Elastic Inference Accelerator supports the following compiler options: *

            *
              *
            • *

              * precision_mode: Specifies the precision of compiled artifacts. Supported values are * "FP16" and "FP32". Default is "FP32". *

              *
            • *
            • *

              * signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults is * TensorFlow's default signature def key. *

              *
            • *
            • *

              * output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set at most * one API field, either: signature_def_key or output_names. *

              *
            • *
            *

            * For example: {"precision_mode": "FP32", "output_names": ["output:0"]} *

            *
          • *
          * * @return Specifies additional parameters for compiler options in JSON format. The compiler options are * TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for * CPU compilations. For any other cases, it is optional to specify CompilerOptions.

          *
            *
          • *

            * DTYPE: Specifies the data type for the input. When compiling for ml_* (except * for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's * input. "float32" is used if "DTYPE" is not specified. Options for data type * are: *

            *
              *
            • *

              * float32: Use either "float" or "float32". *

              *
            • *
            • *

              * int64: Use either "int64" or "long". *

              *
            • *
            *

            * For example, {"dtype" : "float32"}. *

            *
          • *
          • *

            * CPU: Compilation for CPU supports the following compiler options. *

            *
              *
            • *

              * mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} *

              *
            • *
            • *

              * mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} *

              *
            • *
            *
          • *
          • *

            * ARM: Details of ARM CPU compilations. *

            *
              *
            • *

              * NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. *

              *

              * For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit * platform with the NEON support. *

              *
            • *
            *
          • *
          • *

            * NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. *

            *
              *
            • *

              * gpu_code: Specifies the targeted architecture. *

              *
            • *
            • *

              * trt-ver: Specifies the TensorRT versions in x.y.z. format. *

              *
            • *
            • *

              * cuda-ver: Specifies the CUDA version in x.y format. *

              *
            • *
            *

            * For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} *

            *
          • *
          • *

            * ANDROID: Compilation for the Android OS supports the following compiler options: *

            *
              *
            • *

              * ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. * For example, {'ANDROID_PLATFORM': 28}. *

              *
            • *
            • *

              * mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit * platform with NEON support. *

              *
            • *
            *
          • *
          • *

            * INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. * For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". *

            *

            * For information about supported compiler options, see Neuron * Compiler CLI. *

            *
          • *
          • *

            * CoreML: Compilation for the CoreML OutputConfig * TargetDevice supports the following compiler options: *

            *
              *
            • *

              * class_labels: Specifies the classification labels file name inside input tar.gz file. For * example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be * separated by newlines. *

              *
            • *
            *
          • *
          • *

            * EIA: Compilation for the Elastic Inference Accelerator supports the following compiler * options: *

            *
              *
            • *

              * precision_mode: Specifies the precision of compiled artifacts. Supported values are * "FP16" and "FP32". Default is "FP32". *

              *
            • *
            • *

              * signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults * is TensorFlow's default signature def key. *

              *
            • *
            • *

              * output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set * at most one API field, either: signature_def_key or output_names. *

              *
            • *
            *

            * For example: {"precision_mode": "FP32", "output_names": ["output:0"]} *

            *
          • */ public String getCompilerOptions() { return this.compilerOptions; } /** *

            * Specifies additional parameters for compiler options in JSON format. The compiler options are * TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU * compilations. For any other cases, it is optional to specify CompilerOptions. *

            *
              *
            • *

              * DTYPE: Specifies the data type for the input. When compiling for ml_* (except for * ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. * "float32" is used if "DTYPE" is not specified. Options for data type are: *

              *
                *
              • *

                * float32: Use either "float" or "float32". *

                *
              • *
              • *

                * int64: Use either "int64" or "long". *

                *
              • *
              *

              * For example, {"dtype" : "float32"}. *

              *
            • *
            • *

              * CPU: Compilation for CPU supports the following compiler options. *

              *
                *
              • *

                * mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} *

                *
              • *
              • *

                * mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} *

                *
              • *
              *
            • *
            • *

              * ARM: Details of ARM CPU compilations. *

              *
                *
              • *

                * NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. *

                *

                * For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform * with the NEON support. *

                *
              • *
              *
            • *
            • *

              * NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. *

              *
                *
              • *

                * gpu_code: Specifies the targeted architecture. *

                *
              • *
              • *

                * trt-ver: Specifies the TensorRT versions in x.y.z. format. *

                *
              • *
              • *

                * cuda-ver: Specifies the CUDA version in x.y format. *

                *
              • *
              *

              * For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} *

              *
            • *
            • *

              * ANDROID: Compilation for the Android OS supports the following compiler options: *

              *
                *
              • *

                * ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For * example, {'ANDROID_PLATFORM': 28}. *

                *
              • *
              • *

                * mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit * platform with NEON support. *

                *
              • *
              *
            • *
            • *

              * INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For * example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". *

              *

              * For information about supported compiler options, see Neuron * Compiler CLI. *

              *
            • *
            • *

              * CoreML: Compilation for the CoreML OutputConfig * TargetDevice supports the following compiler options: *

              *
                *
              • *

                * class_labels: Specifies the classification labels file name inside input tar.gz file. For example, * {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by * newlines. *

                *
              • *
              *
            • *
            • *

              * EIA: Compilation for the Elastic Inference Accelerator supports the following compiler options: *

              *
                *
              • *

                * precision_mode: Specifies the precision of compiled artifacts. Supported values are * "FP16" and "FP32". Default is "FP32". *

                *
              • *
              • *

                * signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults is * TensorFlow's default signature def key. *

                *
              • *
              • *

                * output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set at most * one API field, either: signature_def_key or output_names. *

                *
              • *
              *

              * For example: {"precision_mode": "FP32", "output_names": ["output:0"]} *

              *
            • *
            * * @param compilerOptions * Specifies additional parameters for compiler options in JSON format. The compiler options are * TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for * CPU compilations. For any other cases, it is optional to specify CompilerOptions.

            *
              *
            • *

              * DTYPE: Specifies the data type for the input. When compiling for ml_* (except * for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's * input. "float32" is used if "DTYPE" is not specified. Options for data type are: *

              *
                *
              • *

                * float32: Use either "float" or "float32". *

                *
              • *
              • *

                * int64: Use either "int64" or "long". *

                *
              • *
              *

              * For example, {"dtype" : "float32"}. *

              *
            • *
            • *

              * CPU: Compilation for CPU supports the following compiler options. *

              *
                *
              • *

                * mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} *

                *
              • *
              • *

                * mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} *

                *
              • *
              *
            • *
            • *

              * ARM: Details of ARM CPU compilations. *

              *
                *
              • *

                * NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. *

                *

                * For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit * platform with the NEON support. *

                *
              • *
              *
            • *
            • *

              * NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. *

              *
                *
              • *

                * gpu_code: Specifies the targeted architecture. *

                *
              • *
              • *

                * trt-ver: Specifies the TensorRT versions in x.y.z. format. *

                *
              • *
              • *

                * cuda-ver: Specifies the CUDA version in x.y format. *

                *
              • *
              *

              * For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} *

              *
            • *
            • *

              * ANDROID: Compilation for the Android OS supports the following compiler options: *

              *
                *
              • *

                * ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For * example, {'ANDROID_PLATFORM': 28}. *

                *
              • *
              • *

                * mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit * platform with NEON support. *

                *
              • *
              *
            • *
            • *

              * INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. * For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". *

              *

              * For information about supported compiler options, see Neuron * Compiler CLI. *

              *
            • *
            • *

              * CoreML: Compilation for the CoreML OutputConfig * TargetDevice supports the following compiler options: *

              *
                *
              • *

                * class_labels: Specifies the classification labels file name inside input tar.gz file. For * example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be * separated by newlines. *

                *
              • *
              *
            • *
            • *

              * EIA: Compilation for the Elastic Inference Accelerator supports the following compiler * options: *

              *
                *
              • *

                * precision_mode: Specifies the precision of compiled artifacts. Supported values are * "FP16" and "FP32". Default is "FP32". *

                *
              • *
              • *

                * signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults * is TensorFlow's default signature def key. *

                *
              • *
              • *

                * output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set * at most one API field, either: signature_def_key or output_names. *

                *
              • *
              *

              * For example: {"precision_mode": "FP32", "output_names": ["output:0"]} *

              *
            • * @return Returns a reference to this object so that method calls can be chained together. */ public OutputConfig withCompilerOptions(String compilerOptions) { setCompilerOptions(compilerOptions); return this; } /** *

              * The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to * encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a * KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more * information, see KMS-Managed Encryption * Keys in the Amazon Simple Storage Service Developer Guide. *

              *

              * The KmsKeyId can be any of the following formats: *

              *
                *
              • *

                * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab *

                *
              • *
              • *

                * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab *

                *
              • *
              • *

                * Alias name: alias/ExampleAlias *

                *
              • *
              • *

                * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias *

                *
              • *
              * * @param kmsKeyId * The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to * encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't * provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For * more information, see KMS-Managed * Encryption Keys in the Amazon Simple Storage Service Developer Guide.

              *

              * The KmsKeyId can be any of the following formats: *

              *
                *
              • *

                * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab *

                *
              • *
              • *

                * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab *

                *
              • *
              • *

                * Alias name: alias/ExampleAlias *

                *
              • *
              • *

                * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias *

                *
              • */ public void setKmsKeyId(String kmsKeyId) { this.kmsKeyId = kmsKeyId; } /** *

                * The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to * encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a * KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more * information, see KMS-Managed Encryption * Keys in the Amazon Simple Storage Service Developer Guide. *

                *

                * The KmsKeyId can be any of the following formats: *

                *
                  *
                • *

                  * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab *

                  *
                • *
                • *

                  * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab *

                  *
                • *
                • *

                  * Alias name: alias/ExampleAlias *

                  *
                • *
                • *

                  * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias *

                  *
                • *
                * * @return The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses * to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't * provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. * For more information, see KMS-Managed * Encryption Keys in the Amazon Simple Storage Service Developer Guide.

                *

                * The KmsKeyId can be any of the following formats: *

                *
                  *
                • *

                  * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab *

                  *
                • *
                • *

                  * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab *

                  *
                • *
                • *

                  * Alias name: alias/ExampleAlias *

                  *
                • *
                • *

                  * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias *

                  *
                • */ public String getKmsKeyId() { return this.kmsKeyId; } /** *

                  * The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to * encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a * KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more * information, see KMS-Managed Encryption * Keys in the Amazon Simple Storage Service Developer Guide. *

                  *

                  * The KmsKeyId can be any of the following formats: *

                  *
                    *
                  • *

                    * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab *

                    *
                  • *
                  • *

                    * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab *

                    *
                  • *
                  • *

                    * Alias name: alias/ExampleAlias *

                    *
                  • *
                  • *

                    * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias *

                    *
                  • *
                  * * @param kmsKeyId * The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to * encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't * provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For * more information, see KMS-Managed * Encryption Keys in the Amazon Simple Storage Service Developer Guide.

                  *

                  * The KmsKeyId can be any of the following formats: *

                  *
                    *
                  • *

                    * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab *

                    *
                  • *
                  • *

                    * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab *

                    *
                  • *
                  • *

                    * Alias name: alias/ExampleAlias *

                    *
                  • *
                  • *

                    * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias *

                    *
                  • * @return Returns a reference to this object so that method calls can be chained together. */ public OutputConfig withKmsKeyId(String kmsKeyId) { setKmsKeyId(kmsKeyId); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getS3OutputLocation() != null) sb.append("S3OutputLocation: ").append(getS3OutputLocation()).append(","); if (getTargetDevice() != null) sb.append("TargetDevice: ").append(getTargetDevice()).append(","); if (getTargetPlatform() != null) sb.append("TargetPlatform: ").append(getTargetPlatform()).append(","); if (getCompilerOptions() != null) sb.append("CompilerOptions: ").append(getCompilerOptions()).append(","); if (getKmsKeyId() != null) sb.append("KmsKeyId: ").append(getKmsKeyId()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof OutputConfig == false) return false; OutputConfig other = (OutputConfig) obj; if (other.getS3OutputLocation() == null ^ this.getS3OutputLocation() == null) return false; if (other.getS3OutputLocation() != null && other.getS3OutputLocation().equals(this.getS3OutputLocation()) == false) return false; if (other.getTargetDevice() == null ^ this.getTargetDevice() == null) return false; if (other.getTargetDevice() != null && other.getTargetDevice().equals(this.getTargetDevice()) == false) return false; if (other.getTargetPlatform() == null ^ this.getTargetPlatform() == null) return false; if (other.getTargetPlatform() != null && other.getTargetPlatform().equals(this.getTargetPlatform()) == false) return false; if (other.getCompilerOptions() == null ^ this.getCompilerOptions() == null) return false; if (other.getCompilerOptions() != null && other.getCompilerOptions().equals(this.getCompilerOptions()) == false) return false; if (other.getKmsKeyId() == null ^ this.getKmsKeyId() == null) return false; if (other.getKmsKeyId() != null && other.getKmsKeyId().equals(this.getKmsKeyId()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getS3OutputLocation() == null) ? 0 : getS3OutputLocation().hashCode()); hashCode = prime * hashCode + ((getTargetDevice() == null) ? 0 : getTargetDevice().hashCode()); hashCode = prime * hashCode + ((getTargetPlatform() == null) ? 0 : getTargetPlatform().hashCode()); hashCode = prime * hashCode + ((getCompilerOptions() == null) ? 0 : getCompilerOptions().hashCode()); hashCode = prime * hashCode + ((getKmsKeyId() == null) ? 0 : getKmsKeyId().hashCode()); return hashCode; } @Override public OutputConfig clone() { try { return (OutputConfig) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.sagemaker.model.transform.OutputConfigMarshaller.getInstance().marshall(this, protocolMarshaller); } }