/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the rekognition-2016-06-27.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Xml.Serialization;
using System.Text;
using System.IO;
using System.Net;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
namespace Amazon.Rekognition.Model
{
///
/// Container for the parameters to the IndexFaces operation.
/// Detects faces in the input image and adds them to the specified collection.
///
///
///
/// Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying
/// detection algorithm first detects the faces in the input image. For each face, the
/// algorithm extracts facial features into a feature vector, and stores it in the backend
/// database. Amazon Rekognition uses feature vectors when it performs face match and
/// search operations using the SearchFaces and SearchFacesByImage operations.
///
///
///
/// For more information, see Adding faces to a collection in the Amazon Rekognition Developer
/// Guide.
///
///
///
/// To get the number of faces in a collection, call DescribeCollection.
///
///
///
/// If you're using version 1.0 of the face detection model, IndexFaces
indexes
/// the 15 largest faces in the input image. Later versions of the face detection model
/// index the 100 largest faces in the input image.
///
///
///
/// If you're using version 4 or later of the face model, image orientation information
/// is not returned in the OrientationCorrection
field.
///
///
///
/// To determine which version of the model you're using, call DescribeCollection
/// and supply the collection ID. You can also get the model version from the value of
/// FaceModelVersion
in the response from IndexFaces
///
///
///
/// For more information, see Model Versioning in the Amazon Rekognition Developer Guide.
///
///
///
/// If you provide the optional ExternalImageId
for the input image you provided,
/// Amazon Rekognition associates this ID with all faces that it detects. When you call
/// the ListFaces operation, the response returns the external ID. You can use
/// this external image ID to create a client-side index to associate the faces with each
/// image. You can then use the index to find all faces in an image.
///
///
///
/// You can specify the maximum number of faces to index with the MaxFaces
/// input parameter. This is useful when you want to index the largest faces in an image
/// and don't want to index smaller faces, such as those belonging to people standing
/// in the background.
///
///
///
/// The QualityFilter
input parameter allows you to filter out detected faces
/// that don’t meet a required quality bar. The quality bar is based on a variety of common
/// use cases. By default, IndexFaces
chooses the quality bar that's used
/// to filter faces. You can also explicitly choose the quality bar. Use QualityFilter
,
/// to set the quality bar by specifying LOW
, MEDIUM
, or HIGH
.
/// If you do not want to filter detected faces, specify NONE
.
///
///
///
/// To use quality filtering, you need a collection associated with version 3 of the face
/// model or higher. To get the version of the face model associated with a collection,
/// call DescribeCollection.
///
///
///
/// Information about faces detected in an image, but not indexed, is returned in an array
/// of UnindexedFace objects, UnindexedFaces
. Faces aren't indexed
/// for reasons such as:
///
/// -
///
/// The number of faces detected exceeds the value of the
MaxFaces
request
/// parameter.
///
/// -
///
/// The face is too small compared to the image dimensions.
///
///
-
///
/// The face is too blurry.
///
///
-
///
/// The image is too dark.
///
///
-
///
/// The face has an extreme pose.
///
///
-
///
/// The face doesn’t have enough detail to be suitable for face search.
///
///
///
/// In response, the IndexFaces
operation returns an array of metadata for
/// all detected faces, FaceRecords
. This includes:
///
/// -
///
/// The bounding box,
BoundingBox
, of the detected face.
///
/// -
///
/// A confidence value,
Confidence
, which indicates the confidence that the
/// bounding box contains a face.
///
/// -
///
/// A face ID,
FaceId
, assigned by the service for each face that's detected
/// and stored.
///
/// -
///
/// An image ID,
ImageId
, assigned by the service for the input image.
///
///
///
/// If you request ALL
or specific facial attributes (e.g., FACE_OCCLUDED
)
/// by using the detectionAttributes parameter, Amazon Rekognition returns detailed facial
/// attributes, such as facial landmarks (for example, location of eye and mouth), facial
/// occlusion, and other facial attributes.
///
///
///
/// If you provide the same image, specify the same collection, and use the same external
/// ID in the IndexFaces
operation, Amazon Rekognition doesn't save duplicate
/// face metadata.
///
///
///
/// The input image is passed either as base64-encoded image bytes, or as a reference
/// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition
/// operations, passing image bytes isn't supported. The image must be formatted as a
/// PNG or JPEG file.
///
///
///
/// This operation requires permissions to perform the rekognition:IndexFaces
/// action.
///
///
public partial class IndexFacesRequest : AmazonRekognitionRequest
{
private string _collectionId;
private List _detectionAttributes = new List();
private string _externalImageId;
private Image _image;
private int? _maxFaces;
private QualityFilter _qualityFilter;
///
/// Gets and sets the property CollectionId.
///
/// The ID of an existing collection to which you want to add the faces that are detected
/// in the input images.
///
///
[AWSProperty(Required=true, Min=1, Max=255)]
public string CollectionId
{
get { return this._collectionId; }
set { this._collectionId = value; }
}
// Check to see if CollectionId property is set
internal bool IsSetCollectionId()
{
return this._collectionId != null;
}
///
/// Gets and sets the property DetectionAttributes.
///
/// An array of facial attributes you want to be returned. A DEFAULT
subset
/// of facial attributes - BoundingBox
, Confidence
, Pose
,
/// Quality
, and Landmarks
- will always be returned. You can
/// request for specific facial attributes (in addition to the default list) - by using
/// ["DEFAULT", "FACE_OCCLUDED"]
or just ["FACE_OCCLUDED"]
.
/// You can request for all facial attributes by using ["ALL"]
. Requesting
/// more attributes may increase response time.
///
///
///
/// If you provide both, ["ALL", "DEFAULT"]
, the service uses a logical AND
/// operator to determine which attributes to return (in this case, all attributes).
///
///
public List DetectionAttributes
{
get { return this._detectionAttributes; }
set { this._detectionAttributes = value; }
}
// Check to see if DetectionAttributes property is set
internal bool IsSetDetectionAttributes()
{
return this._detectionAttributes != null && this._detectionAttributes.Count > 0;
}
///
/// Gets and sets the property ExternalImageId.
///
/// The ID you want to assign to all the faces detected in the image.
///
///
[AWSProperty(Min=1, Max=255)]
public string ExternalImageId
{
get { return this._externalImageId; }
set { this._externalImageId = value; }
}
// Check to see if ExternalImageId property is set
internal bool IsSetExternalImageId()
{
return this._externalImageId != null;
}
///
/// Gets and sets the property Image.
///
/// The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to
/// call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.
///
///
///
///
/// If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode
/// image bytes passed using the Bytes
field. For more information, see Images
/// in the Amazon Rekognition developer guide.
///
///
[AWSProperty(Required=true)]
public Image Image
{
get { return this._image; }
set { this._image = value; }
}
// Check to see if Image property is set
internal bool IsSetImage()
{
return this._image != null;
}
///
/// Gets and sets the property MaxFaces.
///
/// The maximum number of faces to index. The value of MaxFaces
must be greater
/// than or equal to 1. IndexFaces
returns no more than 100 detected faces
/// in an image, even if you specify a larger value for MaxFaces
.
///
///
///
/// If IndexFaces
detects more faces than the value of MaxFaces
,
/// the faces with the lowest quality are filtered out first. If there are still more
/// faces than the value of MaxFaces
, the faces with the smallest bounding
/// boxes are filtered out (up to the number that's needed to satisfy the value of MaxFaces
).
/// Information about the unindexed faces is available in the UnindexedFaces
/// array.
///
///
///
/// The faces that are returned by IndexFaces
are sorted by the largest face
/// bounding box size to the smallest size, in descending order.
///
///
///
/// MaxFaces
can be used with a collection associated with any version of
/// the face model.
///
///
[AWSProperty(Min=1)]
public int MaxFaces
{
get { return this._maxFaces.GetValueOrDefault(); }
set { this._maxFaces = value; }
}
// Check to see if MaxFaces property is set
internal bool IsSetMaxFaces()
{
return this._maxFaces.HasValue;
}
///
/// Gets and sets the property QualityFilter.
///
/// A filter that specifies a quality bar for how much filtering is done to identify faces.
/// Filtered faces aren't indexed. If you specify AUTO
, Amazon Rekognition
/// chooses the quality bar. If you specify LOW
, MEDIUM
, or
/// HIGH
, filtering removes all faces that don’t meet the chosen quality
/// bar. The default value is AUTO
. The quality bar is based on a variety
/// of common use cases. Low-quality detections can occur for a number of reasons. Some
/// examples are an object that's misidentified as a face, a face that's too blurry, or
/// a face with a pose that's too extreme to use. If you specify NONE
, no
/// filtering is performed.
///
///
///
/// To use quality filtering, the collection you are using must be associated with version
/// 3 of the face model or higher.
///
///
public QualityFilter QualityFilter
{
get { return this._qualityFilter; }
set { this._qualityFilter = value; }
}
// Check to see if QualityFilter property is set
internal bool IsSetQualityFilter()
{
return this._qualityFilter != null;
}
}
}