/*******************************************************************************
* Copyright 2012-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use
* this file except in compliance with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file.
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
* *****************************************************************************
*
* AWS Tools for Windows (TM) PowerShell (TM)
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Management.Automation;
using System.Text;
using Amazon.PowerShell.Common;
using Amazon.Runtime;
using Amazon.SageMaker;
using Amazon.SageMaker.Model;
namespace Amazon.PowerShell.Cmdlets.SM
{
///
/// Starts a model compilation job. After the model has been compiled, Amazon SageMaker
/// saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3)
/// bucket that you specify.
///
///
///
/// If you choose to host your model using Amazon SageMaker hosting services, you can
/// use the resulting model artifacts as part of the model. You can also use the artifacts
/// with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource.
///
/// In the request body, you provide the following:
/// -
/// A name for the compilation job
///
-
/// Information about the input model artifacts
///
-
/// The output location for the compiled model and the device (target) that the model
/// runs on
///
-
/// The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform
/// the model compilation job.
///
/// You can also provide a Tag
to track the model compilation job's resource
/// use and costs. The response body contains the CompilationJobArn
for the
/// compiled job.
///
/// To stop a model compilation job, use StopCompilationJob.
/// To get information about a particular model compilation job, use DescribeCompilationJob.
/// To get information about multiple model compilation jobs, use ListCompilationJobs.
///
///
[Cmdlet("New", "SMCompilationJob", SupportsShouldProcess = true, ConfirmImpact = ConfirmImpact.Medium)]
[OutputType("System.String")]
[AWSCmdlet("Calls the Amazon SageMaker Service CreateCompilationJob API operation.", Operation = new[] {"CreateCompilationJob"}, SelectReturnType = typeof(Amazon.SageMaker.Model.CreateCompilationJobResponse))]
[AWSCmdletOutput("System.String or Amazon.SageMaker.Model.CreateCompilationJobResponse",
"This cmdlet returns a System.String object.",
"The service call response (type Amazon.SageMaker.Model.CreateCompilationJobResponse) can also be referenced from properties attached to the cmdlet entry in the $AWSHistory stack."
)]
public partial class NewSMCompilationJobCmdlet : AmazonSageMakerClientCmdlet, IExecutor
{
#region Parameter TargetPlatform_Accelerator
///
///
/// Specifies a target platform accelerator (optional).NVIDIA
: Nvidia graphics processing unit. It also requires gpu-code
,
/// trt-ver
, cuda-ver
compiler optionsMALI
: ARM Mali graphics processorINTEL_GRAPHICS
: Integrated Intel graphics
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("OutputConfig_TargetPlatform_Accelerator")]
[AWSConstantClassSource("Amazon.SageMaker.TargetPlatformAccelerator")]
public Amazon.SageMaker.TargetPlatformAccelerator TargetPlatform_Accelerator { get; set; }
#endregion
#region Parameter TargetPlatform_Arch
///
///
/// Specifies a target platform architecture.X86_64
: 64-bit version of the x86 instruction set.X86
: 32-bit version of the x86 instruction set.ARM64
: ARMv8 64-bit CPU.ARM_EABIHF
: ARMv7 32-bit, Hard Float.ARM_EABI
: ARMv7 32-bit, Soft Float. Used by Android 32-bit ARM platform.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("OutputConfig_TargetPlatform_Arch")]
[AWSConstantClassSource("Amazon.SageMaker.TargetPlatformArch")]
public Amazon.SageMaker.TargetPlatformArch TargetPlatform_Arch { get; set; }
#endregion
#region Parameter CompilationJobName
///
///
/// A name for the model compilation job. The name must be unique within the Amazon Web
/// Services Region and within your Amazon Web Services account.
///
///
#if !MODULAR
[System.Management.Automation.Parameter(Position = 0, ValueFromPipelineByPropertyName = true, ValueFromPipeline = true)]
#else
[System.Management.Automation.Parameter(Position = 0, ValueFromPipelineByPropertyName = true, ValueFromPipeline = true, Mandatory = true)]
[System.Management.Automation.AllowEmptyString]
[System.Management.Automation.AllowNull]
#endif
[Amazon.PowerShell.Common.AWSRequiredParameter]
public System.String CompilationJobName { get; set; }
#endregion
#region Parameter OutputConfig_CompilerOption
///
///
/// Specifies additional parameters for compiler options in JSON format. The compiler
/// options are TargetPlatform
specific. It is required for NVIDIA accelerators
/// and highly recommended for CPU compilations. For any other cases, it is optional to
/// specify CompilerOptions.
DTYPE
: Specifies the data type for the input. When compiling for ml_*
/// (except for ml_inf
) instances using PyTorch framework, provide the data
/// type (dtype) of the model's input. "float32"
is used if "DTYPE"
/// is not specified. Options for data type are:- float32: Use either
"float"
or "float32"
. - int64: Use either
"int64"
or "long"
.
For example, {"dtype" : "float32"}
.CPU
: Compilation for CPU supports the following compiler options.mcpu
: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'}
mattr
: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']}
ARM
: Details of ARM CPU compilations.NEON
: NEON is an implementation of the Advanced SIMD extension used
/// in ARMv7 processors.For example, add {'mattr': ['+neon']}
to the compiler options if compiling
/// for ARM 32-bit platform with the NEON support.
NVIDIA
: Compilation for NVIDIA GPU supports the following compiler options.gpu_code
: Specifies the targeted architecture.trt-ver
: Specifies the TensorRT versions in x.y.z. format.cuda-ver
: Specifies the CUDA version in x.y format.
For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'}
ANDROID
: Compilation for the Android OS supports the following compiler
/// options:ANDROID_PLATFORM
: Specifies the Android API levels. Available levels
/// range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}
.mattr
: Add {'mattr': ['+neon']}
to compiler options if
/// compiling for ARM 32-bit platform with NEON support.
INFERENTIA
: Compilation for target ml_inf1 uses compiler options passed
/// in as a JSON string. For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores
/// 2 -O2\""
. For information about supported compiler options, see
/// Neuron Compiler CLI. CoreML
: Compilation for the CoreML OutputConfigTargetDevice
supports the following compiler options:class_labels
: Specifies the classification labels file name inside input
/// tar.gz file. For example, {"class_labels": "imagenet_labels_1000.txt"}
.
/// Labels inside the txt file should be separated by newlines.
EIA
: Compilation for the Elastic Inference Accelerator supports the
/// following compiler options:precision_mode
: Specifies the precision of compiled artifacts. Supported
/// values are "FP16"
and "FP32"
. Default is "FP32"
.signature_def_key
: Specifies the signature to use for models in SavedModel
/// format. Defaults is TensorFlow's default signature def key.output_names
: Specifies a list of output tensor names for models in
/// FrozenGraph format. Set at most one API field, either: signature_def_key
/// or output_names
.
For example: {"precision_mode": "FP32", "output_names": ["output:0"]}
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("OutputConfig_CompilerOptions")]
public System.String OutputConfig_CompilerOption { get; set; }
#endregion
#region Parameter InputConfig_DataInputConfig
///
///
/// Specifies the name and shape of the expected data inputs for your trained model with
/// a JSON dictionary form. The data inputs are Framework
specific. TensorFlow
: You must specify the name and shape (NHWC format) of the
/// expected data inputs using a dictionary format for your trained model. The dictionary
/// formats required for the console and CLI are different.- Examples for one input:
- If using the console,
{"input":[1,1024,1024,3]}
- If using the CLI,
{\"input\":[1,1024,1024,3]}
- Examples for two inputs:
- If using the console,
{"data1": [1,28,28,1], "data2":[1,28,28,1]}
- If using the CLI,
{\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}
KERAS
: You must specify the name and shape (NCHW format) of expected
/// data inputs using a dictionary format for your trained model. Note that while Keras
/// model artifacts should be uploaded in NHWC (channel-last) format, DataInputConfig
/// should be specified in NCHW (channel-first) format. The dictionary formats required
/// for the console and CLI are different.- Examples for one input:
- If using the console,
{"input_1":[1,3,224,224]}
- If using the CLI,
{\"input_1\":[1,3,224,224]}
- Examples for two inputs:
- If using the console,
{"input_1": [1,3,224,224], "input_2":[1,3,224,224]}
- If using the CLI,
{\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]}
MXNET/ONNX/DARKNET
: You must specify the name and shape (NCHW format)
/// of the expected data inputs in order using a dictionary format for your trained model.
/// The dictionary formats required for the console and CLI are different.- Examples for one input:
- If using the console,
{"data":[1,3,1024,1024]}
- If using the CLI,
{\"data\":[1,3,1024,1024]}
- Examples for two inputs:
- If using the console,
{"var1": [1,1,28,28], "var2":[1,1,28,28]}
- If using the CLI,
{\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]}
PyTorch
: You can either specify the name and shape (NCHW format) of
/// expected data inputs in order using a dictionary format for your trained model or
/// you can specify the shape only using a list format. The dictionary formats required
/// for the console and CLI are different. The list formats for the console and CLI are
/// the same.- Examples for one input in dictionary format:
- If using the console,
{"input0":[1,3,224,224]}
- If using the CLI,
{\"input0\":[1,3,224,224]}
- Example for one input in list format:
[[1,3,224,224]]
- Examples for two inputs in dictionary format:
- If using the console,
{"input0":[1,3,224,224], "input1":[1,3,224,224]}
- If using the CLI,
{\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}
- Example for two inputs in list format:
[[1,3,224,224], [1,3,224,224]]
XGBOOST
: input data name and shape are not needed.
DataInputConfig
supports the following parameters for CoreML
TargetDevice
(ML Model format):shape
: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}
.
/// In addition to static input shapes, CoreML converter supports Flexible input shapes:- Range Dimension. You can use the Range Dimension feature if you know the input shape
/// will be within some specific interval in that dimension, for example:
{"input_1":
/// {"shape": ["1..10", 224, 224, 3]}}
- Enumerated shapes. Sometimes, the models are trained to work only on a select set
/// of inputs. You can enumerate all supported input shapes, for example:
{"input_1":
/// {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}}
default_shape
: Default input shape. You can set a default shape during
/// conversion for both Range Dimension and Enumerated Shapes. For example {"input_1":
/// {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, 3]}}
type
: Input type. Allowed values: Image
and Tensor
.
/// By default, the converter generates an ML Model with inputs of type Tensor (MultiArray).
/// User can set input type to be Image. Image input type requires additional input parameters
/// such as bias
and scale
.bias
: If the input type is an Image, you need to provide the bias vector.scale
: If the input type is an Image, you need to provide a scale factor.
CoreML ClassifierConfig
parameters can be specified using OutputConfigCompilerOptions
. CoreML converter supports Tensorflow and PyTorch models.
/// CoreML conversion examples:- Tensor type input:
"DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape":
/// [1,224,224,3]}}
- Tensor type input without input name (PyTorch):
"DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape":
/// [1,3,224,224]}]
- Image type input:
"DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape":
/// [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}}
"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}
- Image type input without input name (PyTorch):
"DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape":
/// [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}]
"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}
Depending on the model format, DataInputConfig
requires the following
/// parameters for ml_eia2
OutputConfig:TargetDevice.- For TensorFlow models saved in the SavedModel format, specify the input names from
///
signature_def_key
and the input model shapes for DataInputConfig
.
/// Specify the signature_def_key
in OutputConfig:CompilerOptions
if the model does not use TensorFlow's
/// default signature def key. For example:"DataInputConfig": {"inputs": [1, 224, 224, 3]}
"CompilerOptions": {"signature_def_key": "serving_custom"}
- For TensorFlow models saved as a frozen graph, specify the input tensor names and
/// shapes in
DataInputConfig
and the output tensor names for output_names
/// in OutputConfig:CompilerOptions
. For example:"DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]}
"CompilerOptions": {"output_names": ["output_tensor:0"]}
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String InputConfig_DataInputConfig { get; set; }
#endregion
#region Parameter InputConfig_Framework
///
///
/// Identifies the framework in which the model was trained. For example: TENSORFLOW.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[AWSConstantClassSource("Amazon.SageMaker.Framework")]
public Amazon.SageMaker.Framework InputConfig_Framework { get; set; }
#endregion
#region Parameter InputConfig_FrameworkVersion
///
///
/// Specifies the framework version to use. This API field is only supported for the MXNet,
/// PyTorch, TensorFlow and TensorFlow Lite frameworks.For information about framework versions supported for cloud targets and edge devices,
/// see Cloud
/// Supported Instance Types and Frameworks and Edge
/// Supported Frameworks.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String InputConfig_FrameworkVersion { get; set; }
#endregion
#region Parameter OutputConfig_KmsKeyId
///
///
/// The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that
/// Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption
/// after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the
/// default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed
/// Encryption Keys in the Amazon Simple Storage Service Developer Guide.The KmsKeyId can be any of the following formats: - Key ID:
1234abcd-12ab-34cd-56ef-1234567890ab
- Key ARN:
arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
- Alias name:
alias/ExampleAlias
- Alias name ARN:
arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String OutputConfig_KmsKeyId { get; set; }
#endregion
#region Parameter StoppingCondition_MaxRuntimeInSecond
///
///
/// The maximum length of time, in seconds, that a training or compilation job can run
/// before it is stopped.For compilation jobs, if the job does not complete during this time, a TimeOut
/// error is generated. We recommend starting with 900 seconds and increasing as necessary
/// based on your model.For all other jobs, if the job does not complete during this time, SageMaker ends
/// the job. When RetryStrategy
is specified in the job request, MaxRuntimeInSeconds
/// specifies the maximum time for all of the attempts in total, not each individual attempt.
/// The default value is 1 day. The maximum value is 28 days.The maximum time that a TrainingJob
can run in total, including any time
/// spent publishing metrics or archiving and uploading models after it has been stopped,
/// is 30 days.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("StoppingCondition_MaxRuntimeInSeconds")]
public System.Int32? StoppingCondition_MaxRuntimeInSecond { get; set; }
#endregion
#region Parameter StoppingCondition_MaxWaitTimeInSecond
///
///
/// The maximum length of time, in seconds, that a managed Spot training job has to complete.
/// It is the amount of time spent waiting for Spot capacity plus the amount of time the
/// job can run. It must be equal to or greater than MaxRuntimeInSeconds
.
/// If the job does not complete during this time, SageMaker ends the job.When RetryStrategy
is specified in the job request, MaxWaitTimeInSeconds
/// specifies the maximum time for all of the attempts in total, not each individual attempt.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("StoppingCondition_MaxWaitTimeInSeconds")]
public System.Int32? StoppingCondition_MaxWaitTimeInSecond { get; set; }
#endregion
#region Parameter ModelPackageVersionArn
///
///
/// The Amazon Resource Name (ARN) of a versioned model package. Provide either a ModelPackageVersionArn
/// or an InputConfig
object in the request syntax. The presence of both
/// objects in the CreateCompilationJob
request will return an exception.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String ModelPackageVersionArn { get; set; }
#endregion
#region Parameter TargetPlatform_Os
///
///
/// Specifies a target platform OS.LINUX
: Linux-based operating systems.ANDROID
: Android operating systems. Android API level can be specified
/// using the ANDROID_PLATFORM
compiler option. For example, "CompilerOptions":
/// {'ANDROID_PLATFORM': 28}
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("OutputConfig_TargetPlatform_Os")]
[AWSConstantClassSource("Amazon.SageMaker.TargetPlatformOs")]
public Amazon.SageMaker.TargetPlatformOs TargetPlatform_Os { get; set; }
#endregion
#region Parameter RoleArn
///
///
/// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform
/// tasks on your behalf. During model compilation, Amazon SageMaker needs your permission to:- Read input data from an S3 bucket
- Write model artifacts to an S3 bucket
- Write logs to Amazon CloudWatch Logs
- Publish metrics to Amazon CloudWatch
You grant permissions for all of these tasks to an IAM role. To pass this role to
/// Amazon SageMaker, the caller of this API must have the iam:PassRole
permission.
/// For more information, see Amazon
/// SageMaker Roles.
///
///
#if !MODULAR
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
#else
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true, Mandatory = true)]
[System.Management.Automation.AllowEmptyString]
[System.Management.Automation.AllowNull]
#endif
[Amazon.PowerShell.Common.AWSRequiredParameter]
public System.String RoleArn { get; set; }
#endregion
#region Parameter OutputConfig_S3OutputLocation
///
///
/// Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts.
/// For example, s3://bucket-name/key-name-prefix
.
///
///
#if !MODULAR
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
#else
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true, Mandatory = true)]
[System.Management.Automation.AllowEmptyString]
[System.Management.Automation.AllowNull]
#endif
[Amazon.PowerShell.Common.AWSRequiredParameter]
public System.String OutputConfig_S3OutputLocation { get; set; }
#endregion
#region Parameter InputConfig_S3Uri
///
///
/// The S3 path where the model artifacts, which result from model training, are stored.
/// This path must point to a single gzip compressed tar archive (.tar.gz suffix).
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String InputConfig_S3Uri { get; set; }
#endregion
#region Parameter VpcConfig_SecurityGroupId
///
///
/// The VPC security group IDs. IDs have the form of sg-xxxxxxxx
. Specify
/// the security groups for the VPC that is specified in the Subnets
field.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("VpcConfig_SecurityGroupIds")]
public System.String[] VpcConfig_SecurityGroupId { get; set; }
#endregion
#region Parameter VpcConfig_Subnet
///
///
/// The ID of the subnets in the VPC that you want to connect the compilation job to for
/// accessing the model in Amazon S3.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("VpcConfig_Subnets")]
public System.String[] VpcConfig_Subnet { get; set; }
#endregion
#region Parameter Tag
///
///
/// An array of key-value pairs. You can use tags to categorize your Amazon Web Services
/// resources in different ways, for example, by purpose, owner, or environment. For more
/// information, see Tagging
/// Amazon Web Services Resources.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("Tags")]
public Amazon.SageMaker.Model.Tag[] Tag { get; set; }
#endregion
#region Parameter OutputConfig_TargetDevice
///
///
/// Identifies the target device or the machine learning instance that you want to run
/// your model on after the compilation has completed. Alternatively, you can specify
/// OS, architecture, and accelerator using TargetPlatform
/// fields. It can be used instead of TargetPlatform
.Currently ml_trn1
is available only in US East (N. Virginia) Region,
/// and ml_inf2
is available only in US East (Ohio) Region.
///
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[AWSConstantClassSource("Amazon.SageMaker.TargetDevice")]
public Amazon.SageMaker.TargetDevice OutputConfig_TargetDevice { get; set; }
#endregion
#region Parameter Select
///
/// Use the -Select parameter to control the cmdlet output. The default value is 'CompilationJobArn'.
/// Specifying -Select '*' will result in the cmdlet returning the whole service response (Amazon.SageMaker.Model.CreateCompilationJobResponse).
/// Specifying the name of a property of type Amazon.SageMaker.Model.CreateCompilationJobResponse will result in that property being returned.
/// Specifying -Select '^ParameterName' will result in the cmdlet returning the selected cmdlet parameter value.
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public string Select { get; set; } = "CompilationJobArn";
#endregion
#region Parameter PassThru
///
/// Changes the cmdlet behavior to return the value passed to the CompilationJobName parameter.
/// The -PassThru parameter is deprecated, use -Select '^CompilationJobName' instead. This parameter will be removed in a future version.
///
[System.Obsolete("The -PassThru parameter is deprecated, use -Select '^CompilationJobName' instead. This parameter will be removed in a future version.")]
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public SwitchParameter PassThru { get; set; }
#endregion
#region Parameter Force
///
/// This parameter overrides confirmation prompts to force
/// the cmdlet to continue its operation. This parameter should always
/// be used with caution.
///
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public SwitchParameter Force { get; set; }
#endregion
protected override void ProcessRecord()
{
this._AWSSignerType = "v4";
base.ProcessRecord();
var resourceIdentifiersText = FormatParameterValuesForConfirmationMsg(nameof(this.CompilationJobName), MyInvocation.BoundParameters);
if (!ConfirmShouldProceed(this.Force.IsPresent, resourceIdentifiersText, "New-SMCompilationJob (CreateCompilationJob)"))
{
return;
}
var context = new CmdletContext();
// allow for manipulation of parameters prior to loading into context
PreExecutionContextLoad(context);
#pragma warning disable CS0618, CS0612 //A class member was marked with the Obsolete attribute
if (ParameterWasBound(nameof(this.Select)))
{
context.Select = CreateSelectDelegate(Select) ??
throw new System.ArgumentException("Invalid value for -Select parameter.", nameof(this.Select));
if (this.PassThru.IsPresent)
{
throw new System.ArgumentException("-PassThru cannot be used when -Select is specified.", nameof(this.Select));
}
}
else if (this.PassThru.IsPresent)
{
context.Select = (response, cmdlet) => this.CompilationJobName;
}
#pragma warning restore CS0618, CS0612 //A class member was marked with the Obsolete attribute
context.CompilationJobName = this.CompilationJobName;
#if MODULAR
if (this.CompilationJobName == null && ParameterWasBound(nameof(this.CompilationJobName)))
{
WriteWarning("You are passing $null as a value for parameter CompilationJobName which is marked as required. In case you believe this parameter was incorrectly marked as required, report this by opening an issue at https://github.com/aws/aws-tools-for-powershell/issues.");
}
#endif
context.InputConfig_DataInputConfig = this.InputConfig_DataInputConfig;
context.InputConfig_Framework = this.InputConfig_Framework;
context.InputConfig_FrameworkVersion = this.InputConfig_FrameworkVersion;
context.InputConfig_S3Uri = this.InputConfig_S3Uri;
context.ModelPackageVersionArn = this.ModelPackageVersionArn;
context.OutputConfig_CompilerOption = this.OutputConfig_CompilerOption;
context.OutputConfig_KmsKeyId = this.OutputConfig_KmsKeyId;
context.OutputConfig_S3OutputLocation = this.OutputConfig_S3OutputLocation;
#if MODULAR
if (this.OutputConfig_S3OutputLocation == null && ParameterWasBound(nameof(this.OutputConfig_S3OutputLocation)))
{
WriteWarning("You are passing $null as a value for parameter OutputConfig_S3OutputLocation which is marked as required. In case you believe this parameter was incorrectly marked as required, report this by opening an issue at https://github.com/aws/aws-tools-for-powershell/issues.");
}
#endif
context.OutputConfig_TargetDevice = this.OutputConfig_TargetDevice;
context.TargetPlatform_Accelerator = this.TargetPlatform_Accelerator;
context.TargetPlatform_Arch = this.TargetPlatform_Arch;
context.TargetPlatform_Os = this.TargetPlatform_Os;
context.RoleArn = this.RoleArn;
#if MODULAR
if (this.RoleArn == null && ParameterWasBound(nameof(this.RoleArn)))
{
WriteWarning("You are passing $null as a value for parameter RoleArn which is marked as required. In case you believe this parameter was incorrectly marked as required, report this by opening an issue at https://github.com/aws/aws-tools-for-powershell/issues.");
}
#endif
context.StoppingCondition_MaxRuntimeInSecond = this.StoppingCondition_MaxRuntimeInSecond;
context.StoppingCondition_MaxWaitTimeInSecond = this.StoppingCondition_MaxWaitTimeInSecond;
if (this.Tag != null)
{
context.Tag = new List(this.Tag);
}
if (this.VpcConfig_SecurityGroupId != null)
{
context.VpcConfig_SecurityGroupId = new List(this.VpcConfig_SecurityGroupId);
}
if (this.VpcConfig_Subnet != null)
{
context.VpcConfig_Subnet = new List(this.VpcConfig_Subnet);
}
// allow further manipulation of loaded context prior to processing
PostExecutionContextLoad(context);
var output = Execute(context) as CmdletOutput;
ProcessOutput(output);
}
#region IExecutor Members
public object Execute(ExecutorContext context)
{
var cmdletContext = context as CmdletContext;
// create request
var request = new Amazon.SageMaker.Model.CreateCompilationJobRequest();
if (cmdletContext.CompilationJobName != null)
{
request.CompilationJobName = cmdletContext.CompilationJobName;
}
// populate InputConfig
var requestInputConfigIsNull = true;
request.InputConfig = new Amazon.SageMaker.Model.InputConfig();
System.String requestInputConfig_inputConfig_DataInputConfig = null;
if (cmdletContext.InputConfig_DataInputConfig != null)
{
requestInputConfig_inputConfig_DataInputConfig = cmdletContext.InputConfig_DataInputConfig;
}
if (requestInputConfig_inputConfig_DataInputConfig != null)
{
request.InputConfig.DataInputConfig = requestInputConfig_inputConfig_DataInputConfig;
requestInputConfigIsNull = false;
}
Amazon.SageMaker.Framework requestInputConfig_inputConfig_Framework = null;
if (cmdletContext.InputConfig_Framework != null)
{
requestInputConfig_inputConfig_Framework = cmdletContext.InputConfig_Framework;
}
if (requestInputConfig_inputConfig_Framework != null)
{
request.InputConfig.Framework = requestInputConfig_inputConfig_Framework;
requestInputConfigIsNull = false;
}
System.String requestInputConfig_inputConfig_FrameworkVersion = null;
if (cmdletContext.InputConfig_FrameworkVersion != null)
{
requestInputConfig_inputConfig_FrameworkVersion = cmdletContext.InputConfig_FrameworkVersion;
}
if (requestInputConfig_inputConfig_FrameworkVersion != null)
{
request.InputConfig.FrameworkVersion = requestInputConfig_inputConfig_FrameworkVersion;
requestInputConfigIsNull = false;
}
System.String requestInputConfig_inputConfig_S3Uri = null;
if (cmdletContext.InputConfig_S3Uri != null)
{
requestInputConfig_inputConfig_S3Uri = cmdletContext.InputConfig_S3Uri;
}
if (requestInputConfig_inputConfig_S3Uri != null)
{
request.InputConfig.S3Uri = requestInputConfig_inputConfig_S3Uri;
requestInputConfigIsNull = false;
}
// determine if request.InputConfig should be set to null
if (requestInputConfigIsNull)
{
request.InputConfig = null;
}
if (cmdletContext.ModelPackageVersionArn != null)
{
request.ModelPackageVersionArn = cmdletContext.ModelPackageVersionArn;
}
// populate OutputConfig
var requestOutputConfigIsNull = true;
request.OutputConfig = new Amazon.SageMaker.Model.OutputConfig();
System.String requestOutputConfig_outputConfig_CompilerOption = null;
if (cmdletContext.OutputConfig_CompilerOption != null)
{
requestOutputConfig_outputConfig_CompilerOption = cmdletContext.OutputConfig_CompilerOption;
}
if (requestOutputConfig_outputConfig_CompilerOption != null)
{
request.OutputConfig.CompilerOptions = requestOutputConfig_outputConfig_CompilerOption;
requestOutputConfigIsNull = false;
}
System.String requestOutputConfig_outputConfig_KmsKeyId = null;
if (cmdletContext.OutputConfig_KmsKeyId != null)
{
requestOutputConfig_outputConfig_KmsKeyId = cmdletContext.OutputConfig_KmsKeyId;
}
if (requestOutputConfig_outputConfig_KmsKeyId != null)
{
request.OutputConfig.KmsKeyId = requestOutputConfig_outputConfig_KmsKeyId;
requestOutputConfigIsNull = false;
}
System.String requestOutputConfig_outputConfig_S3OutputLocation = null;
if (cmdletContext.OutputConfig_S3OutputLocation != null)
{
requestOutputConfig_outputConfig_S3OutputLocation = cmdletContext.OutputConfig_S3OutputLocation;
}
if (requestOutputConfig_outputConfig_S3OutputLocation != null)
{
request.OutputConfig.S3OutputLocation = requestOutputConfig_outputConfig_S3OutputLocation;
requestOutputConfigIsNull = false;
}
Amazon.SageMaker.TargetDevice requestOutputConfig_outputConfig_TargetDevice = null;
if (cmdletContext.OutputConfig_TargetDevice != null)
{
requestOutputConfig_outputConfig_TargetDevice = cmdletContext.OutputConfig_TargetDevice;
}
if (requestOutputConfig_outputConfig_TargetDevice != null)
{
request.OutputConfig.TargetDevice = requestOutputConfig_outputConfig_TargetDevice;
requestOutputConfigIsNull = false;
}
Amazon.SageMaker.Model.TargetPlatform requestOutputConfig_outputConfig_TargetPlatform = null;
// populate TargetPlatform
var requestOutputConfig_outputConfig_TargetPlatformIsNull = true;
requestOutputConfig_outputConfig_TargetPlatform = new Amazon.SageMaker.Model.TargetPlatform();
Amazon.SageMaker.TargetPlatformAccelerator requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Accelerator = null;
if (cmdletContext.TargetPlatform_Accelerator != null)
{
requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Accelerator = cmdletContext.TargetPlatform_Accelerator;
}
if (requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Accelerator != null)
{
requestOutputConfig_outputConfig_TargetPlatform.Accelerator = requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Accelerator;
requestOutputConfig_outputConfig_TargetPlatformIsNull = false;
}
Amazon.SageMaker.TargetPlatformArch requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Arch = null;
if (cmdletContext.TargetPlatform_Arch != null)
{
requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Arch = cmdletContext.TargetPlatform_Arch;
}
if (requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Arch != null)
{
requestOutputConfig_outputConfig_TargetPlatform.Arch = requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Arch;
requestOutputConfig_outputConfig_TargetPlatformIsNull = false;
}
Amazon.SageMaker.TargetPlatformOs requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Os = null;
if (cmdletContext.TargetPlatform_Os != null)
{
requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Os = cmdletContext.TargetPlatform_Os;
}
if (requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Os != null)
{
requestOutputConfig_outputConfig_TargetPlatform.Os = requestOutputConfig_outputConfig_TargetPlatform_targetPlatform_Os;
requestOutputConfig_outputConfig_TargetPlatformIsNull = false;
}
// determine if requestOutputConfig_outputConfig_TargetPlatform should be set to null
if (requestOutputConfig_outputConfig_TargetPlatformIsNull)
{
requestOutputConfig_outputConfig_TargetPlatform = null;
}
if (requestOutputConfig_outputConfig_TargetPlatform != null)
{
request.OutputConfig.TargetPlatform = requestOutputConfig_outputConfig_TargetPlatform;
requestOutputConfigIsNull = false;
}
// determine if request.OutputConfig should be set to null
if (requestOutputConfigIsNull)
{
request.OutputConfig = null;
}
if (cmdletContext.RoleArn != null)
{
request.RoleArn = cmdletContext.RoleArn;
}
// populate StoppingCondition
var requestStoppingConditionIsNull = true;
request.StoppingCondition = new Amazon.SageMaker.Model.StoppingCondition();
System.Int32? requestStoppingCondition_stoppingCondition_MaxRuntimeInSecond = null;
if (cmdletContext.StoppingCondition_MaxRuntimeInSecond != null)
{
requestStoppingCondition_stoppingCondition_MaxRuntimeInSecond = cmdletContext.StoppingCondition_MaxRuntimeInSecond.Value;
}
if (requestStoppingCondition_stoppingCondition_MaxRuntimeInSecond != null)
{
request.StoppingCondition.MaxRuntimeInSeconds = requestStoppingCondition_stoppingCondition_MaxRuntimeInSecond.Value;
requestStoppingConditionIsNull = false;
}
System.Int32? requestStoppingCondition_stoppingCondition_MaxWaitTimeInSecond = null;
if (cmdletContext.StoppingCondition_MaxWaitTimeInSecond != null)
{
requestStoppingCondition_stoppingCondition_MaxWaitTimeInSecond = cmdletContext.StoppingCondition_MaxWaitTimeInSecond.Value;
}
if (requestStoppingCondition_stoppingCondition_MaxWaitTimeInSecond != null)
{
request.StoppingCondition.MaxWaitTimeInSeconds = requestStoppingCondition_stoppingCondition_MaxWaitTimeInSecond.Value;
requestStoppingConditionIsNull = false;
}
// determine if request.StoppingCondition should be set to null
if (requestStoppingConditionIsNull)
{
request.StoppingCondition = null;
}
if (cmdletContext.Tag != null)
{
request.Tags = cmdletContext.Tag;
}
// populate VpcConfig
var requestVpcConfigIsNull = true;
request.VpcConfig = new Amazon.SageMaker.Model.NeoVpcConfig();
List requestVpcConfig_vpcConfig_SecurityGroupId = null;
if (cmdletContext.VpcConfig_SecurityGroupId != null)
{
requestVpcConfig_vpcConfig_SecurityGroupId = cmdletContext.VpcConfig_SecurityGroupId;
}
if (requestVpcConfig_vpcConfig_SecurityGroupId != null)
{
request.VpcConfig.SecurityGroupIds = requestVpcConfig_vpcConfig_SecurityGroupId;
requestVpcConfigIsNull = false;
}
List requestVpcConfig_vpcConfig_Subnet = null;
if (cmdletContext.VpcConfig_Subnet != null)
{
requestVpcConfig_vpcConfig_Subnet = cmdletContext.VpcConfig_Subnet;
}
if (requestVpcConfig_vpcConfig_Subnet != null)
{
request.VpcConfig.Subnets = requestVpcConfig_vpcConfig_Subnet;
requestVpcConfigIsNull = false;
}
// determine if request.VpcConfig should be set to null
if (requestVpcConfigIsNull)
{
request.VpcConfig = null;
}
CmdletOutput output;
// issue call
var client = Client ?? CreateClient(_CurrentCredentials, _RegionEndpoint);
try
{
var response = CallAWSServiceOperation(client, request);
object pipelineOutput = null;
pipelineOutput = cmdletContext.Select(response, this);
output = new CmdletOutput
{
PipelineOutput = pipelineOutput,
ServiceResponse = response
};
}
catch (Exception e)
{
output = new CmdletOutput { ErrorResponse = e };
}
return output;
}
public ExecutorContext CreateContext()
{
return new CmdletContext();
}
#endregion
#region AWS Service Operation Call
private Amazon.SageMaker.Model.CreateCompilationJobResponse CallAWSServiceOperation(IAmazonSageMaker client, Amazon.SageMaker.Model.CreateCompilationJobRequest request)
{
Utils.Common.WriteVerboseEndpointMessage(this, client.Config, "Amazon SageMaker Service", "CreateCompilationJob");
try
{
#if DESKTOP
return client.CreateCompilationJob(request);
#elif CORECLR
return client.CreateCompilationJobAsync(request).GetAwaiter().GetResult();
#else
#error "Unknown build edition"
#endif
}
catch (AmazonServiceException exc)
{
var webException = exc.InnerException as System.Net.WebException;
if (webException != null)
{
throw new Exception(Utils.Common.FormatNameResolutionFailureMessage(client.Config, webException.Message), webException);
}
throw;
}
}
#endregion
internal partial class CmdletContext : ExecutorContext
{
public System.String CompilationJobName { get; set; }
public System.String InputConfig_DataInputConfig { get; set; }
public Amazon.SageMaker.Framework InputConfig_Framework { get; set; }
public System.String InputConfig_FrameworkVersion { get; set; }
public System.String InputConfig_S3Uri { get; set; }
public System.String ModelPackageVersionArn { get; set; }
public System.String OutputConfig_CompilerOption { get; set; }
public System.String OutputConfig_KmsKeyId { get; set; }
public System.String OutputConfig_S3OutputLocation { get; set; }
public Amazon.SageMaker.TargetDevice OutputConfig_TargetDevice { get; set; }
public Amazon.SageMaker.TargetPlatformAccelerator TargetPlatform_Accelerator { get; set; }
public Amazon.SageMaker.TargetPlatformArch TargetPlatform_Arch { get; set; }
public Amazon.SageMaker.TargetPlatformOs TargetPlatform_Os { get; set; }
public System.String RoleArn { get; set; }
public System.Int32? StoppingCondition_MaxRuntimeInSecond { get; set; }
public System.Int32? StoppingCondition_MaxWaitTimeInSecond { get; set; }
public List Tag { get; set; }
public List VpcConfig_SecurityGroupId { get; set; }
public List VpcConfig_Subnet { get; set; }
public System.Func Select { get; set; } =
(response, cmdlet) => response.CompilationJobArn;
}
}
}