/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the rekognition-2016-06-27.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Xml.Serialization;
using System.Text;
using System.IO;
using System.Net;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
namespace Amazon.Rekognition.Model
{
///
/// Container for the parameters to the DetectFaces operation.
/// Detects faces within an image that is provided as input.
///
///
///
/// DetectFaces
detects the 100 largest faces in the image. For each face
/// detected, the operation returns face details. These details include a bounding box
/// of the face, a confidence value (that the bounding box contains a face), and a fixed
/// set of attributes such as facial landmarks (for example, coordinates of eye and mouth),
/// pose, presence of facial occlusion, and so on.
///
///
///
/// The face-detection algorithm is most effective on frontal faces. For non-frontal or
/// obscured faces, the algorithm might not detect the faces or might detect faces with
/// lower confidence.
///
///
///
/// You pass the input image either as base64-encoded image bytes or as a reference to
/// an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition
/// operations, passing image bytes is not supported. The image must be either a PNG or
/// JPEG formatted file.
///
///
///
/// This is a stateless API operation. That is, the operation does not persist any data.
///
///
///
/// This operation requires permissions to perform the rekognition:DetectFaces
/// action.
///
///
public partial class DetectFacesRequest : AmazonRekognitionRequest
{
private List _attributes = new List();
private Image _image;
///
/// Gets and sets the property Attributes.
///
/// An array of facial attributes you want to be returned. A DEFAULT
subset
/// of facial attributes - BoundingBox
, Confidence
, Pose
,
/// Quality
, and Landmarks
- will always be returned. You can
/// request for specific facial attributes (in addition to the default list) - by using
/// ["DEFAULT", "FACE_OCCLUDED"
] or just ["FACE_OCCLUDED"
].
/// You can request for all facial attributes by using ["ALL"]
. Requesting
/// more attributes may increase response time.
///
///
///
/// If you provide both, ["ALL", "DEFAULT"]
, the service uses a logical "AND"
/// operator to determine which attributes to return (in this case, all attributes).
///
///
public List Attributes
{
get { return this._attributes; }
set { this._attributes = value; }
}
// Check to see if Attributes property is set
internal bool IsSetAttributes()
{
return this._attributes != null && this._attributes.Count > 0;
}
///
/// Gets and sets the property Image.
///
/// The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to
/// call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.
///
///
///
///
/// If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode
/// image bytes passed using the Bytes
field. For more information, see Images
/// in the Amazon Rekognition developer guide.
///
///
[AWSProperty(Required=true)]
public Image Image
{
get { return this._image; }
set { this._image = value; }
}
// Check to see if Image property is set
internal bool IsSetImage()
{
return this._image != null;
}
}
}