/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the sagemaker-2017-07-24.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Xml.Serialization;
using System.Text;
using System.IO;
using System.Net;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
namespace Amazon.SageMaker.Model
{
///
/// Contains information about the output location for the compiled model and the target
/// device that the model runs on. TargetDevice
and TargetPlatform
/// are mutually exclusive, so you need to choose one between the two to specify your
/// target device or platform. If you cannot find your device you want to use from the
/// TargetDevice
list, use TargetPlatform
to describe the platform
/// of your edge device and CompilerOptions
if there are specific settings
/// that are required or recommended to use for particular TargetPlatform.
///
public partial class OutputConfig
{
private string _compilerOptions;
private string _kmsKeyId;
private string _s3OutputLocation;
private TargetDevice _targetDevice;
private TargetPlatform _targetPlatform;
///
/// Gets and sets the property CompilerOptions.
///
/// Specifies additional parameters for compiler options in JSON format. The compiler
/// options are TargetPlatform
specific. It is required for NVIDIA accelerators
/// and highly recommended for CPU compilations. For any other cases, it is optional to
/// specify CompilerOptions.
///
/// -
///
///
DTYPE
: Specifies the data type for the input. When compiling for ml_*
/// (except for ml_inf
) instances using PyTorch framework, provide the data
/// type (dtype) of the model's input. "float32"
is used if "DTYPE"
/// is not specified. Options for data type are:
///
/// -
///
/// float32: Use either
"float"
or "float32"
.
///
/// -
///
/// int64: Use either
"int64"
or "long"
.
///
///
///
/// For example, {"dtype" : "float32"}
.
///
/// -
///
///
CPU
: Compilation for CPU supports the following compiler options.
///
/// -
///
///
mcpu
: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'}
///
///
/// -
///
///
mattr
: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']}
///
///
///
-
///
///
ARM
: Details of ARM CPU compilations.
///
/// -
///
///
NEON
: NEON is an implementation of the Advanced SIMD extension used
/// in ARMv7 processors.
///
///
///
/// For example, add {'mattr': ['+neon']}
to the compiler options if compiling
/// for ARM 32-bit platform with the NEON support.
///
///
-
///
///
NVIDIA
: Compilation for NVIDIA GPU supports the following compiler options.
///
/// -
///
///
gpu_code
: Specifies the targeted architecture.
///
/// -
///
///
trt-ver
: Specifies the TensorRT versions in x.y.z. format.
///
/// -
///
///
cuda-ver
: Specifies the CUDA version in x.y format.
///
///
///
/// For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'}
///
///
/// -
///
///
ANDROID
: Compilation for the Android OS supports the following compiler
/// options:
///
/// -
///
///
ANDROID_PLATFORM
: Specifies the Android API levels. Available levels
/// range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}
.
///
/// -
///
///
mattr
: Add {'mattr': ['+neon']}
to compiler options if
/// compiling for ARM 32-bit platform with NEON support.
///
///
-
///
///
INFERENTIA
: Compilation for target ml_inf1 uses compiler options passed
/// in as a JSON string. For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores
/// 2 -O2\""
.
///
///
///
/// For information about supported compiler options, see
/// Neuron Compiler CLI.
///
/// -
///
///
CoreML
: Compilation for the CoreML OutputConfig
/// TargetDevice
supports the following compiler options:
///
/// -
///
///
class_labels
: Specifies the classification labels file name inside input
/// tar.gz file. For example, {"class_labels": "imagenet_labels_1000.txt"}
.
/// Labels inside the txt file should be separated by newlines.
///
///
-
///
///
EIA
: Compilation for the Elastic Inference Accelerator supports the
/// following compiler options:
///
/// -
///
///
precision_mode
: Specifies the precision of compiled artifacts. Supported
/// values are "FP16"
and "FP32"
. Default is "FP32"
.
///
/// -
///
///
signature_def_key
: Specifies the signature to use for models in SavedModel
/// format. Defaults is TensorFlow's default signature def key.
///
/// -
///
///
output_names
: Specifies a list of output tensor names for models in
/// FrozenGraph format. Set at most one API field, either: signature_def_key
/// or output_names
.
///
///
///
/// For example: {"precision_mode": "FP32", "output_names": ["output:0"]}
///
///
///
///
[AWSProperty(Min=3, Max=1024)]
public string CompilerOptions
{
get { return this._compilerOptions; }
set { this._compilerOptions = value; }
}
// Check to see if CompilerOptions property is set
internal bool IsSetCompilerOptions()
{
return this._compilerOptions != null;
}
///
/// Gets and sets the property KmsKeyId.
///
/// The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that
/// Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption
/// after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the
/// default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed
/// Encryption Keys in the Amazon Simple Storage Service Developer Guide.
///
///
///
/// The KmsKeyId can be any of the following formats:
///
/// -
///
/// Key ID:
1234abcd-12ab-34cd-56ef-1234567890ab
///
/// -
///
/// Key ARN:
arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
///
///
/// -
///
/// Alias name:
alias/ExampleAlias
///
/// -
///
/// Alias name ARN:
arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
///
///
///
///
[AWSProperty(Max=2048)]
public string KmsKeyId
{
get { return this._kmsKeyId; }
set { this._kmsKeyId = value; }
}
// Check to see if KmsKeyId property is set
internal bool IsSetKmsKeyId()
{
return this._kmsKeyId != null;
}
///
/// Gets and sets the property S3OutputLocation.
///
/// Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts.
/// For example, s3://bucket-name/key-name-prefix
.
///
///
[AWSProperty(Required=true, Max=1024)]
public string S3OutputLocation
{
get { return this._s3OutputLocation; }
set { this._s3OutputLocation = value; }
}
// Check to see if S3OutputLocation property is set
internal bool IsSetS3OutputLocation()
{
return this._s3OutputLocation != null;
}
///
/// Gets and sets the property TargetDevice.
///
/// Identifies the target device or the machine learning instance that you want to run
/// your model on after the compilation has completed. Alternatively, you can specify
/// OS, architecture, and accelerator using TargetPlatform
/// fields. It can be used instead of TargetPlatform
.
///
///
///
/// Currently ml_trn1
is available only in US East (N. Virginia) Region,
/// and ml_inf2
is available only in US East (Ohio) Region.
///
///
///
public TargetDevice TargetDevice
{
get { return this._targetDevice; }
set { this._targetDevice = value; }
}
// Check to see if TargetDevice property is set
internal bool IsSetTargetDevice()
{
return this._targetDevice != null;
}
///
/// Gets and sets the property TargetPlatform.
///
/// Contains information about a target platform that you want your model to run on, such
/// as OS, architecture, and accelerators. It is an alternative of TargetDevice
.
///
///
///
/// The following examples show how to configure the TargetPlatform
and CompilerOptions
/// JSON strings for popular target platforms:
///
/// -
///
/// Raspberry Pi 3 Model B+
///
///
///
///
"TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"},
///
///
///
/// "CompilerOptions": {'mattr': ['+neon']}
///
/// -
///
/// Jetson TX2
///
///
///
///
"TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"},
///
///
///
///
/// "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'}
///
///
/// -
///
/// EC2 m5.2xlarge instance OS
///
///
///
///
"TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", "Accelerator": "NVIDIA"},
///
///
///
///
/// "CompilerOptions": {'mcpu': 'skylake-avx512'}
///
/// -
///
/// RK3399
///
///
///
///
"TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "MALI"}
///
///
/// -
///
/// ARMv7 phone (CPU)
///
///
///
///
"TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"},
///
///
///
/// "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']}
///
/// -
///
/// ARMv8 phone (CPU)
///
///
///
///
"TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"},
///
///
///
/// "CompilerOptions": {'ANDROID_PLATFORM': 29}
///
///
///
public TargetPlatform TargetPlatform
{
get { return this._targetPlatform; }
set { this._targetPlatform = value; }
}
// Check to see if TargetPlatform property is set
internal bool IsSetTargetPlatform()
{
return this._targetPlatform != null;
}
}
}