/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include namespace Aws { template class AmazonWebServiceResult; namespace Utils { namespace Json { class JsonValue; } // namespace Json } // namespace Utils namespace Rekognition { namespace Model { class IndexFacesResult { public: AWS_REKOGNITION_API IndexFacesResult(); AWS_REKOGNITION_API IndexFacesResult(const Aws::AmazonWebServiceResult& result); AWS_REKOGNITION_API IndexFacesResult& operator=(const Aws::AmazonWebServiceResult& result); /** *

An array of faces detected and added to the collection. For more information, * see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. *

*/ inline const Aws::Vector& GetFaceRecords() const{ return m_faceRecords; } /** *

An array of faces detected and added to the collection. For more information, * see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. *

*/ inline void SetFaceRecords(const Aws::Vector& value) { m_faceRecords = value; } /** *

An array of faces detected and added to the collection. For more information, * see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. *

*/ inline void SetFaceRecords(Aws::Vector&& value) { m_faceRecords = std::move(value); } /** *

An array of faces detected and added to the collection. For more information, * see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. *

*/ inline IndexFacesResult& WithFaceRecords(const Aws::Vector& value) { SetFaceRecords(value); return *this;} /** *

An array of faces detected and added to the collection. For more information, * see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. *

*/ inline IndexFacesResult& WithFaceRecords(Aws::Vector&& value) { SetFaceRecords(std::move(value)); return *this;} /** *

An array of faces detected and added to the collection. For more information, * see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. *

*/ inline IndexFacesResult& AddFaceRecords(const FaceRecord& value) { m_faceRecords.push_back(value); return *this; } /** *

An array of faces detected and added to the collection. For more information, * see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. *

*/ inline IndexFacesResult& AddFaceRecords(FaceRecord&& value) { m_faceRecords.push_back(std::move(value)); return *this; } /** *

If your collection is associated with a face detection model that's later * than version 3.0, the value of OrientationCorrection is always null * and no orientation information is returned.

If your collection is * associated with a face detection model that's version 3.0 or earlier, the * following applies:

  • If the input image is in .jpeg format, it * might contain exchangeable image file format (Exif) metadata that includes the * image's orientation. Amazon Rekognition uses this orientation information to * perform image correction - the bounding box coordinates are translated to * represent object locations after the orientation information in the Exif * metadata is used to correct the image orientation. Images in .png format don't * contain Exif metadata. The value of OrientationCorrection is * null.

  • If the image doesn't contain orientation information in * its Exif metadata, Amazon Rekognition returns an estimated orientation * (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t * perform image correction for images. The bounding box coordinates aren't * translated and represent the object locations before the image is rotated.

    *

Bounding box information is returned in the * FaceRecords array. You can get the version of the face detection * model by calling DescribeCollection.

*/ inline const OrientationCorrection& GetOrientationCorrection() const{ return m_orientationCorrection; } /** *

If your collection is associated with a face detection model that's later * than version 3.0, the value of OrientationCorrection is always null * and no orientation information is returned.

If your collection is * associated with a face detection model that's version 3.0 or earlier, the * following applies:

  • If the input image is in .jpeg format, it * might contain exchangeable image file format (Exif) metadata that includes the * image's orientation. Amazon Rekognition uses this orientation information to * perform image correction - the bounding box coordinates are translated to * represent object locations after the orientation information in the Exif * metadata is used to correct the image orientation. Images in .png format don't * contain Exif metadata. The value of OrientationCorrection is * null.

  • If the image doesn't contain orientation information in * its Exif metadata, Amazon Rekognition returns an estimated orientation * (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t * perform image correction for images. The bounding box coordinates aren't * translated and represent the object locations before the image is rotated.

    *

Bounding box information is returned in the * FaceRecords array. You can get the version of the face detection * model by calling DescribeCollection.

*/ inline void SetOrientationCorrection(const OrientationCorrection& value) { m_orientationCorrection = value; } /** *

If your collection is associated with a face detection model that's later * than version 3.0, the value of OrientationCorrection is always null * and no orientation information is returned.

If your collection is * associated with a face detection model that's version 3.0 or earlier, the * following applies:

  • If the input image is in .jpeg format, it * might contain exchangeable image file format (Exif) metadata that includes the * image's orientation. Amazon Rekognition uses this orientation information to * perform image correction - the bounding box coordinates are translated to * represent object locations after the orientation information in the Exif * metadata is used to correct the image orientation. Images in .png format don't * contain Exif metadata. The value of OrientationCorrection is * null.

  • If the image doesn't contain orientation information in * its Exif metadata, Amazon Rekognition returns an estimated orientation * (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t * perform image correction for images. The bounding box coordinates aren't * translated and represent the object locations before the image is rotated.

    *

Bounding box information is returned in the * FaceRecords array. You can get the version of the face detection * model by calling DescribeCollection.

*/ inline void SetOrientationCorrection(OrientationCorrection&& value) { m_orientationCorrection = std::move(value); } /** *

If your collection is associated with a face detection model that's later * than version 3.0, the value of OrientationCorrection is always null * and no orientation information is returned.

If your collection is * associated with a face detection model that's version 3.0 or earlier, the * following applies:

  • If the input image is in .jpeg format, it * might contain exchangeable image file format (Exif) metadata that includes the * image's orientation. Amazon Rekognition uses this orientation information to * perform image correction - the bounding box coordinates are translated to * represent object locations after the orientation information in the Exif * metadata is used to correct the image orientation. Images in .png format don't * contain Exif metadata. The value of OrientationCorrection is * null.

  • If the image doesn't contain orientation information in * its Exif metadata, Amazon Rekognition returns an estimated orientation * (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t * perform image correction for images. The bounding box coordinates aren't * translated and represent the object locations before the image is rotated.

    *

Bounding box information is returned in the * FaceRecords array. You can get the version of the face detection * model by calling DescribeCollection.

*/ inline IndexFacesResult& WithOrientationCorrection(const OrientationCorrection& value) { SetOrientationCorrection(value); return *this;} /** *

If your collection is associated with a face detection model that's later * than version 3.0, the value of OrientationCorrection is always null * and no orientation information is returned.

If your collection is * associated with a face detection model that's version 3.0 or earlier, the * following applies:

  • If the input image is in .jpeg format, it * might contain exchangeable image file format (Exif) metadata that includes the * image's orientation. Amazon Rekognition uses this orientation information to * perform image correction - the bounding box coordinates are translated to * represent object locations after the orientation information in the Exif * metadata is used to correct the image orientation. Images in .png format don't * contain Exif metadata. The value of OrientationCorrection is * null.

  • If the image doesn't contain orientation information in * its Exif metadata, Amazon Rekognition returns an estimated orientation * (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t * perform image correction for images. The bounding box coordinates aren't * translated and represent the object locations before the image is rotated.

    *

Bounding box information is returned in the * FaceRecords array. You can get the version of the face detection * model by calling DescribeCollection.

*/ inline IndexFacesResult& WithOrientationCorrection(OrientationCorrection&& value) { SetOrientationCorrection(std::move(value)); return *this;} /** *

The version number of the face detection model that's associated with the * input collection (CollectionId).

*/ inline const Aws::String& GetFaceModelVersion() const{ return m_faceModelVersion; } /** *

The version number of the face detection model that's associated with the * input collection (CollectionId).

*/ inline void SetFaceModelVersion(const Aws::String& value) { m_faceModelVersion = value; } /** *

The version number of the face detection model that's associated with the * input collection (CollectionId).

*/ inline void SetFaceModelVersion(Aws::String&& value) { m_faceModelVersion = std::move(value); } /** *

The version number of the face detection model that's associated with the * input collection (CollectionId).

*/ inline void SetFaceModelVersion(const char* value) { m_faceModelVersion.assign(value); } /** *

The version number of the face detection model that's associated with the * input collection (CollectionId).

*/ inline IndexFacesResult& WithFaceModelVersion(const Aws::String& value) { SetFaceModelVersion(value); return *this;} /** *

The version number of the face detection model that's associated with the * input collection (CollectionId).

*/ inline IndexFacesResult& WithFaceModelVersion(Aws::String&& value) { SetFaceModelVersion(std::move(value)); return *this;} /** *

The version number of the face detection model that's associated with the * input collection (CollectionId).

*/ inline IndexFacesResult& WithFaceModelVersion(const char* value) { SetFaceModelVersion(value); return *this;} /** *

An array of faces that were detected in the image but weren't indexed. They * weren't indexed because the quality filter identified them as low quality, or * the MaxFaces request parameter filtered them out. To use the * quality filter, you specify the QualityFilter request * parameter.

*/ inline const Aws::Vector& GetUnindexedFaces() const{ return m_unindexedFaces; } /** *

An array of faces that were detected in the image but weren't indexed. They * weren't indexed because the quality filter identified them as low quality, or * the MaxFaces request parameter filtered them out. To use the * quality filter, you specify the QualityFilter request * parameter.

*/ inline void SetUnindexedFaces(const Aws::Vector& value) { m_unindexedFaces = value; } /** *

An array of faces that were detected in the image but weren't indexed. They * weren't indexed because the quality filter identified them as low quality, or * the MaxFaces request parameter filtered them out. To use the * quality filter, you specify the QualityFilter request * parameter.

*/ inline void SetUnindexedFaces(Aws::Vector&& value) { m_unindexedFaces = std::move(value); } /** *

An array of faces that were detected in the image but weren't indexed. They * weren't indexed because the quality filter identified them as low quality, or * the MaxFaces request parameter filtered them out. To use the * quality filter, you specify the QualityFilter request * parameter.

*/ inline IndexFacesResult& WithUnindexedFaces(const Aws::Vector& value) { SetUnindexedFaces(value); return *this;} /** *

An array of faces that were detected in the image but weren't indexed. They * weren't indexed because the quality filter identified them as low quality, or * the MaxFaces request parameter filtered them out. To use the * quality filter, you specify the QualityFilter request * parameter.

*/ inline IndexFacesResult& WithUnindexedFaces(Aws::Vector&& value) { SetUnindexedFaces(std::move(value)); return *this;} /** *

An array of faces that were detected in the image but weren't indexed. They * weren't indexed because the quality filter identified them as low quality, or * the MaxFaces request parameter filtered them out. To use the * quality filter, you specify the QualityFilter request * parameter.

*/ inline IndexFacesResult& AddUnindexedFaces(const UnindexedFace& value) { m_unindexedFaces.push_back(value); return *this; } /** *

An array of faces that were detected in the image but weren't indexed. They * weren't indexed because the quality filter identified them as low quality, or * the MaxFaces request parameter filtered them out. To use the * quality filter, you specify the QualityFilter request * parameter.

*/ inline IndexFacesResult& AddUnindexedFaces(UnindexedFace&& value) { m_unindexedFaces.push_back(std::move(value)); return *this; } inline const Aws::String& GetRequestId() const{ return m_requestId; } inline void SetRequestId(const Aws::String& value) { m_requestId = value; } inline void SetRequestId(Aws::String&& value) { m_requestId = std::move(value); } inline void SetRequestId(const char* value) { m_requestId.assign(value); } inline IndexFacesResult& WithRequestId(const Aws::String& value) { SetRequestId(value); return *this;} inline IndexFacesResult& WithRequestId(Aws::String&& value) { SetRequestId(std::move(value)); return *this;} inline IndexFacesResult& WithRequestId(const char* value) { SetRequestId(value); return *this;} private: Aws::Vector m_faceRecords; OrientationCorrection m_orientationCorrection; Aws::String m_faceModelVersion; Aws::Vector m_unindexedFaces; Aws::String m_requestId; }; } // namespace Model } // namespace Rekognition } // namespace Aws