/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the sagemaker-2017-07-24.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Xml.Serialization;
using System.Text;
using System.IO;
using System.Net;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
namespace Amazon.SageMaker.Model
{
///
/// Information required for human workers to complete a labeling task.
///
public partial class HumanTaskConfig
{
private AnnotationConsolidationConfig _annotationConsolidationConfig;
private int? _maxConcurrentTaskCount;
private int? _numberOfHumanWorkersPerDataObject;
private string _preHumanTaskLambdaArn;
private PublicWorkforceTaskPrice _publicWorkforceTaskPrice;
private int? _taskAvailabilityLifetimeInSeconds;
private string _taskDescription;
private List _taskKeywords = new List();
private int? _taskTimeLimitInSeconds;
private string _taskTitle;
private UiConfig _uiConfig;
private string _workteamArn;
///
/// Gets and sets the property AnnotationConsolidationConfig.
///
/// Configures how labels are consolidated across human workers.
///
///
[AWSProperty(Required=true)]
public AnnotationConsolidationConfig AnnotationConsolidationConfig
{
get { return this._annotationConsolidationConfig; }
set { this._annotationConsolidationConfig = value; }
}
// Check to see if AnnotationConsolidationConfig property is set
internal bool IsSetAnnotationConsolidationConfig()
{
return this._annotationConsolidationConfig != null;
}
///
/// Gets and sets the property MaxConcurrentTaskCount.
///
/// Defines the maximum number of data objects that can be labeled by human workers at
/// the same time. Also referred to as batch size. Each object may have more than one
/// worker at one time. The default value is 1000 objects. To increase the maximum value
/// to 5000 objects, contact Amazon Web Services Support.
///
///
[AWSProperty(Min=1, Max=5000)]
public int MaxConcurrentTaskCount
{
get { return this._maxConcurrentTaskCount.GetValueOrDefault(); }
set { this._maxConcurrentTaskCount = value; }
}
// Check to see if MaxConcurrentTaskCount property is set
internal bool IsSetMaxConcurrentTaskCount()
{
return this._maxConcurrentTaskCount.HasValue;
}
///
/// Gets and sets the property NumberOfHumanWorkersPerDataObject.
///
/// The number of human workers that will label an object.
///
///
[AWSProperty(Required=true, Min=1, Max=9)]
public int NumberOfHumanWorkersPerDataObject
{
get { return this._numberOfHumanWorkersPerDataObject.GetValueOrDefault(); }
set { this._numberOfHumanWorkersPerDataObject = value; }
}
// Check to see if NumberOfHumanWorkersPerDataObject property is set
internal bool IsSetNumberOfHumanWorkersPerDataObject()
{
return this._numberOfHumanWorkersPerDataObject.HasValue;
}
///
/// Gets and sets the property PreHumanTaskLambdaArn.
///
/// The Amazon Resource Name (ARN) of a Lambda function that is run before a data object
/// is sent to a human worker. Use this function to provide input to a custom labeling
/// job.
///
///
///
/// For built-in
/// task types, use one of the following Amazon SageMaker Ground Truth Lambda function
/// ARNs for PreHumanTaskLambdaArn
. For custom labeling workflows, see Pre-annotation
/// Lambda.
///
///
///
/// Bounding box - Finds the most similar boxes from different workers based on
/// the Jaccard index of the boxes.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox
///
///
///
///
/// Image classification - Uses a variant of the Expectation Maximization approach
/// to estimate the true class of an image based on annotations from individual workers.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass
///
///
///
///
/// Multi-label image classification - Uses a variant of the Expectation Maximization
/// approach to estimate the true classes of an image based on annotations from individual
/// workers.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel
///
///
///
///
/// Semantic segmentation - Treats each pixel in an image as a multi-class classification
/// and treats pixel annotations from workers as "votes" for the correct label.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation
///
///
///
///
/// Text classification - Uses a variant of the Expectation Maximization approach
/// to estimate the true class of text based on annotations from individual workers.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass
///
///
///
///
/// Multi-label text classification - Uses a variant of the Expectation Maximization
/// approach to estimate the true classes of text based on annotations from individual
/// workers.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel
///
///
///
///
/// Named entity recognition - Groups similar selections and calculates aggregate
/// boundaries, resolving to most-assigned label.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition
///
///
///
///
/// Video Classification - Use this task type when you need workers to classify
/// videos using predefined labels that you specify. Workers are shown videos and are
/// asked to choose one label for each video.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass
///
///
///
///
/// Video Frame Object Detection - Use this task type to have workers identify
/// and locate objects in a sequence of video frames (images extracted from a video) using
/// bounding boxes. For example, you can use this task to ask workers to identify and
/// localize various objects in a series of video frames, such as cars, bikes, and pedestrians.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection
///
///
///
///
/// Video Frame Object Tracking - Use this task type to have workers track the
/// movement of objects in a sequence of video frames (images extracted from a video)
/// using bounding boxes. For example, you can use this task to ask workers to track the
/// movement of objects, such as cars, bikes, and pedestrians.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking
///
///
///
///
/// 3D Point Cloud Modalities
///
///
///
/// Use the following pre-annotation lambdas for 3D point cloud labeling modality tasks.
/// See 3D
/// Point Cloud Task types to learn more.
///
///
///
/// 3D Point Cloud Object Detection - Use this task type when you want workers
/// to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For
/// example, you can use this task type to ask workers to identify different types of
/// objects in a point cloud, such as cars, bikes, and pedestrians.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection
///
///
///
///
/// 3D Point Cloud Object Tracking - Use this task type when you want workers
/// to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames.
/// For example, you can use this task type to ask workers to track the movement of vehicles
/// across multiple point cloud frames.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking
///
///
///
///
/// 3D Point Cloud Semantic Segmentation - Use this task type when you want workers
/// to create a point-level semantic segmentation masks by painting objects in a 3D point
/// cloud using different colors where each color is assigned to one of the classes you
/// specify.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation
///
///
///
///
/// Use the following ARNs for Label Verification and Adjustment Jobs
///
///
///
/// Use label verification and adjustment jobs to review and adjust labels. To learn more,
/// see Verify
/// and Adjust Labels .
///
///
///
/// Bounding box verification - Uses a variant of the Expectation Maximization
/// approach to estimate the true class of verification judgement for bounding box labels
/// based on annotations from individual workers.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationBoundingBox
///
///
///
///
/// Bounding box adjustment - Finds the most similar boxes from different workers
/// based on the Jaccard index of the adjusted annotations.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox
///
///
///
///
/// Semantic segmentation verification - Uses a variant of the Expectation Maximization
/// approach to estimate the true class of verification judgment for semantic segmentation
/// labels based on annotations from individual workers.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation
///
///
///
///
/// Semantic segmentation adjustment - Treats each pixel in an image as a multi-class
/// classification and treats pixel adjusted annotations from workers as "votes" for the
/// correct label.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation
///
///
///
///
/// Video Frame Object Detection Adjustment - Use this task type when you want
/// workers to adjust bounding boxes that workers have added to video frames to classify
/// and localize objects in a sequence of video frames.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection
///
///
///
///
/// Video Frame Object Tracking Adjustment - Use this task type when you want
/// workers to adjust bounding boxes that workers have added to video frames to track
/// object movement across a sequence of video frames.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking
///
///
///
///
/// 3D point cloud object detection adjustment - Adjust 3D cuboids in a point
/// cloud frame.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection
///
///
///
///
/// 3D point cloud object tracking adjustment - Adjust 3D cuboids across a sequence
/// of point cloud frames.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking
///
///
///
///
/// 3D point cloud semantic segmentation adjustment - Adjust semantic segmentation
/// masks in a 3D point cloud.
///
/// -
///
///
arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
/// -
///
///
arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation
///
///
///
///
[AWSProperty(Required=true, Max=2048)]
public string PreHumanTaskLambdaArn
{
get { return this._preHumanTaskLambdaArn; }
set { this._preHumanTaskLambdaArn = value; }
}
// Check to see if PreHumanTaskLambdaArn property is set
internal bool IsSetPreHumanTaskLambdaArn()
{
return this._preHumanTaskLambdaArn != null;
}
///
/// Gets and sets the property PublicWorkforceTaskPrice.
///
/// The price that you pay for each task performed by an Amazon Mechanical Turk worker.
///
///
public PublicWorkforceTaskPrice PublicWorkforceTaskPrice
{
get { return this._publicWorkforceTaskPrice; }
set { this._publicWorkforceTaskPrice = value; }
}
// Check to see if PublicWorkforceTaskPrice property is set
internal bool IsSetPublicWorkforceTaskPrice()
{
return this._publicWorkforceTaskPrice != null;
}
///
/// Gets and sets the property TaskAvailabilityLifetimeInSeconds.
///
/// The length of time that a task remains available for labeling by human workers. The
/// default and maximum values for this parameter depend on the type of workforce you
/// use.
///
/// -
///
/// If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours (43,200
/// seconds). The default is 6 hours (21,600 seconds).
///
///
-
///
/// If you choose a private or vendor workforce, the default value is 30 days (2592,000
/// seconds) for non-AL mode. For most users, the maximum is also 30 days.
///
///
///
[AWSProperty(Min=60)]
public int TaskAvailabilityLifetimeInSeconds
{
get { return this._taskAvailabilityLifetimeInSeconds.GetValueOrDefault(); }
set { this._taskAvailabilityLifetimeInSeconds = value; }
}
// Check to see if TaskAvailabilityLifetimeInSeconds property is set
internal bool IsSetTaskAvailabilityLifetimeInSeconds()
{
return this._taskAvailabilityLifetimeInSeconds.HasValue;
}
///
/// Gets and sets the property TaskDescription.
///
/// A description of the task for your human workers.
///
///
[AWSProperty(Required=true, Min=1, Max=255)]
public string TaskDescription
{
get { return this._taskDescription; }
set { this._taskDescription = value; }
}
// Check to see if TaskDescription property is set
internal bool IsSetTaskDescription()
{
return this._taskDescription != null;
}
///
/// Gets and sets the property TaskKeywords.
///
/// Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover
/// the task.
///
///
[AWSProperty(Min=1, Max=5)]
public List TaskKeywords
{
get { return this._taskKeywords; }
set { this._taskKeywords = value; }
}
// Check to see if TaskKeywords property is set
internal bool IsSetTaskKeywords()
{
return this._taskKeywords != null && this._taskKeywords.Count > 0;
}
///
/// Gets and sets the property TaskTimeLimitInSeconds.
///
/// The amount of time that a worker has to complete a task.
///
///
///
/// If you create a custom labeling job, the maximum value for this parameter is 8 hours
/// (28,800 seconds).
///
///
///
/// If you create a labeling job using a built-in
/// task type the maximum for this parameter depends on the task type you use:
///
/// -
///
/// For image
/// and text
/// labeling jobs, the maximum is 8 hours (28,800 seconds).
///
///
-
///
/// For 3D
/// point cloud and video
/// frame labeling jobs, the maximum is 30 days (2952,000 seconds) for non-AL mode.
/// For most users, the maximum is also 30 days.
///
///
///
[AWSProperty(Required=true, Min=30)]
public int TaskTimeLimitInSeconds
{
get { return this._taskTimeLimitInSeconds.GetValueOrDefault(); }
set { this._taskTimeLimitInSeconds = value; }
}
// Check to see if TaskTimeLimitInSeconds property is set
internal bool IsSetTaskTimeLimitInSeconds()
{
return this._taskTimeLimitInSeconds.HasValue;
}
///
/// Gets and sets the property TaskTitle.
///
/// A title for the task for your human workers.
///
///
[AWSProperty(Required=true, Min=1, Max=128)]
public string TaskTitle
{
get { return this._taskTitle; }
set { this._taskTitle = value; }
}
// Check to see if TaskTitle property is set
internal bool IsSetTaskTitle()
{
return this._taskTitle != null;
}
///
/// Gets and sets the property UiConfig.
///
/// Information about the user interface that workers use to complete the labeling task.
///
///
[AWSProperty(Required=true)]
public UiConfig UiConfig
{
get { return this._uiConfig; }
set { this._uiConfig = value; }
}
// Check to see if UiConfig property is set
internal bool IsSetUiConfig()
{
return this._uiConfig != null;
}
///
/// Gets and sets the property WorkteamArn.
///
/// The Amazon Resource Name (ARN) of the work team assigned to complete the tasks.
///
///
[AWSProperty(Required=true, Max=256)]
public string WorkteamArn
{
get { return this._workteamArn; }
set { this._workteamArn = value; }
}
// Check to see if WorkteamArn property is set
internal bool IsSetWorkteamArn()
{
return this._workteamArn != null;
}
}
}