/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.rekognition.model; import java.io.Serializable; import javax.annotation.Generated; @Generated("com.amazonaws:aws-java-sdk-code-generator") public class IndexFacesResult extends com.amazonaws.AmazonWebServiceResult implements Serializable, Cloneable { /** *

* An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection * in the Amazon Rekognition Developer Guide. *

*/ private java.util.List faceRecords; /** *

* If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned. *

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

* *

* Bounding box information is returned in the FaceRecords array. You can get the version of the face * detection model by calling DescribeCollection. *

*/ private String orientationCorrection; /** *

* The version number of the face detection model that's associated with the input collection ( * CollectionId). *

*/ private String faceModelVersion; /** *

* An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality * filter identified them as low quality, or the MaxFaces request parameter filtered them out. To use * the quality filter, you specify the QualityFilter request parameter. *

*/ private java.util.List unindexedFaces; /** *

* An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection * in the Amazon Rekognition Developer Guide. *

* * @return An array of faces detected and added to the collection. For more information, see Searching Faces in a * Collection in the Amazon Rekognition Developer Guide. */ public java.util.List getFaceRecords() { return faceRecords; } /** *

* An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection * in the Amazon Rekognition Developer Guide. *

* * @param faceRecords * An array of faces detected and added to the collection. For more information, see Searching Faces in a * Collection in the Amazon Rekognition Developer Guide. */ public void setFaceRecords(java.util.Collection faceRecords) { if (faceRecords == null) { this.faceRecords = null; return; } this.faceRecords = new java.util.ArrayList(faceRecords); } /** *

* An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection * in the Amazon Rekognition Developer Guide. *

*

* NOTE: This method appends the values to the existing list (if any). Use * {@link #setFaceRecords(java.util.Collection)} or {@link #withFaceRecords(java.util.Collection)} if you want to * override the existing values. *

* * @param faceRecords * An array of faces detected and added to the collection. For more information, see Searching Faces in a * Collection in the Amazon Rekognition Developer Guide. * @return Returns a reference to this object so that method calls can be chained together. */ public IndexFacesResult withFaceRecords(FaceRecord... faceRecords) { if (this.faceRecords == null) { setFaceRecords(new java.util.ArrayList(faceRecords.length)); } for (FaceRecord ele : faceRecords) { this.faceRecords.add(ele); } return this; } /** *

* An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection * in the Amazon Rekognition Developer Guide. *

* * @param faceRecords * An array of faces detected and added to the collection. For more information, see Searching Faces in a * Collection in the Amazon Rekognition Developer Guide. * @return Returns a reference to this object so that method calls can be chained together. */ public IndexFacesResult withFaceRecords(java.util.Collection faceRecords) { setFaceRecords(faceRecords); return this; } /** *

* If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned. *

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that * includes the image's orientation. Amazon Rekognition uses this orientation information to perform image * correction - the bounding box coordinates are translated to represent object locations after the orientation * information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain * Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image * correction for images. The bounding box coordinates aren't translated and represent the object locations before * the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the face * detection model by calling DescribeCollection. *

* * @param orientationCorrection * If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned.

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata * that includes the image's orientation. Amazon Rekognition uses this orientation information to perform * image correction - the bounding box coordinates are translated to represent object locations after the * orientation information in the Exif metadata is used to correct the image orientation. Images in .png * format don't contain Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform * image correction for images. The bounding box coordinates aren't translated and represent the object * locations before the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the * face detection model by calling DescribeCollection. * @see OrientationCorrection */ public void setOrientationCorrection(String orientationCorrection) { this.orientationCorrection = orientationCorrection; } /** *

* If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned. *

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that * includes the image's orientation. Amazon Rekognition uses this orientation information to perform image * correction - the bounding box coordinates are translated to represent object locations after the orientation * information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain * Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image * correction for images. The bounding box coordinates aren't translated and represent the object locations before * the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the face * detection model by calling DescribeCollection. *

* * @return If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned.

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata * that includes the image's orientation. Amazon Rekognition uses this orientation information to perform * image correction - the bounding box coordinates are translated to represent object locations after the * orientation information in the Exif metadata is used to correct the image orientation. Images in .png * format don't contain Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform * image correction for images. The bounding box coordinates aren't translated and represent the object * locations before the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of * the face detection model by calling DescribeCollection. * @see OrientationCorrection */ public String getOrientationCorrection() { return this.orientationCorrection; } /** *

* If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned. *

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that * includes the image's orientation. Amazon Rekognition uses this orientation information to perform image * correction - the bounding box coordinates are translated to represent object locations after the orientation * information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain * Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image * correction for images. The bounding box coordinates aren't translated and represent the object locations before * the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the face * detection model by calling DescribeCollection. *

* * @param orientationCorrection * If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned.

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata * that includes the image's orientation. Amazon Rekognition uses this orientation information to perform * image correction - the bounding box coordinates are translated to represent object locations after the * orientation information in the Exif metadata is used to correct the image orientation. Images in .png * format don't contain Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform * image correction for images. The bounding box coordinates aren't translated and represent the object * locations before the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the * face detection model by calling DescribeCollection. * @return Returns a reference to this object so that method calls can be chained together. * @see OrientationCorrection */ public IndexFacesResult withOrientationCorrection(String orientationCorrection) { setOrientationCorrection(orientationCorrection); return this; } /** *

* If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned. *

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that * includes the image's orientation. Amazon Rekognition uses this orientation information to perform image * correction - the bounding box coordinates are translated to represent object locations after the orientation * information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain * Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image * correction for images. The bounding box coordinates aren't translated and represent the object locations before * the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the face * detection model by calling DescribeCollection. *

* * @param orientationCorrection * If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned.

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata * that includes the image's orientation. Amazon Rekognition uses this orientation information to perform * image correction - the bounding box coordinates are translated to represent object locations after the * orientation information in the Exif metadata is used to correct the image orientation. Images in .png * format don't contain Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform * image correction for images. The bounding box coordinates aren't translated and represent the object * locations before the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the * face detection model by calling DescribeCollection. * @see OrientationCorrection */ public void setOrientationCorrection(OrientationCorrection orientationCorrection) { withOrientationCorrection(orientationCorrection); } /** *

* If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned. *

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that * includes the image's orientation. Amazon Rekognition uses this orientation information to perform image * correction - the bounding box coordinates are translated to represent object locations after the orientation * information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain * Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image * correction for images. The bounding box coordinates aren't translated and represent the object locations before * the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the face * detection model by calling DescribeCollection. *

* * @param orientationCorrection * If your collection is associated with a face detection model that's later than version 3.0, the value of * OrientationCorrection is always null and no orientation information is returned.

*

* If your collection is associated with a face detection model that's version 3.0 or earlier, the following * applies: *

*
    *
  • *

    * If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata * that includes the image's orientation. Amazon Rekognition uses this orientation information to perform * image correction - the bounding box coordinates are translated to represent object locations after the * orientation information in the Exif metadata is used to correct the image orientation. Images in .png * format don't contain Exif metadata. The value of OrientationCorrection is null. *

    *
  • *
  • *

    * If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an * estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform * image correction for images. The bounding box coordinates aren't translated and represent the object * locations before the image is rotated. *

    *
  • *
*

* Bounding box information is returned in the FaceRecords array. You can get the version of the * face detection model by calling DescribeCollection. * @return Returns a reference to this object so that method calls can be chained together. * @see OrientationCorrection */ public IndexFacesResult withOrientationCorrection(OrientationCorrection orientationCorrection) { this.orientationCorrection = orientationCorrection.toString(); return this; } /** *

* The version number of the face detection model that's associated with the input collection ( * CollectionId). *

* * @param faceModelVersion * The version number of the face detection model that's associated with the input collection ( * CollectionId). */ public void setFaceModelVersion(String faceModelVersion) { this.faceModelVersion = faceModelVersion; } /** *

* The version number of the face detection model that's associated with the input collection ( * CollectionId). *

* * @return The version number of the face detection model that's associated with the input collection ( * CollectionId). */ public String getFaceModelVersion() { return this.faceModelVersion; } /** *

* The version number of the face detection model that's associated with the input collection ( * CollectionId). *

* * @param faceModelVersion * The version number of the face detection model that's associated with the input collection ( * CollectionId). * @return Returns a reference to this object so that method calls can be chained together. */ public IndexFacesResult withFaceModelVersion(String faceModelVersion) { setFaceModelVersion(faceModelVersion); return this; } /** *

* An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality * filter identified them as low quality, or the MaxFaces request parameter filtered them out. To use * the quality filter, you specify the QualityFilter request parameter. *

* * @return An array of faces that were detected in the image but weren't indexed. They weren't indexed because the * quality filter identified them as low quality, or the MaxFaces request parameter filtered * them out. To use the quality filter, you specify the QualityFilter request parameter. */ public java.util.List getUnindexedFaces() { return unindexedFaces; } /** *

* An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality * filter identified them as low quality, or the MaxFaces request parameter filtered them out. To use * the quality filter, you specify the QualityFilter request parameter. *

* * @param unindexedFaces * An array of faces that were detected in the image but weren't indexed. They weren't indexed because the * quality filter identified them as low quality, or the MaxFaces request parameter filtered * them out. To use the quality filter, you specify the QualityFilter request parameter. */ public void setUnindexedFaces(java.util.Collection unindexedFaces) { if (unindexedFaces == null) { this.unindexedFaces = null; return; } this.unindexedFaces = new java.util.ArrayList(unindexedFaces); } /** *

* An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality * filter identified them as low quality, or the MaxFaces request parameter filtered them out. To use * the quality filter, you specify the QualityFilter request parameter. *

*

* NOTE: This method appends the values to the existing list (if any). Use * {@link #setUnindexedFaces(java.util.Collection)} or {@link #withUnindexedFaces(java.util.Collection)} if you want * to override the existing values. *

* * @param unindexedFaces * An array of faces that were detected in the image but weren't indexed. They weren't indexed because the * quality filter identified them as low quality, or the MaxFaces request parameter filtered * them out. To use the quality filter, you specify the QualityFilter request parameter. * @return Returns a reference to this object so that method calls can be chained together. */ public IndexFacesResult withUnindexedFaces(UnindexedFace... unindexedFaces) { if (this.unindexedFaces == null) { setUnindexedFaces(new java.util.ArrayList(unindexedFaces.length)); } for (UnindexedFace ele : unindexedFaces) { this.unindexedFaces.add(ele); } return this; } /** *

* An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality * filter identified them as low quality, or the MaxFaces request parameter filtered them out. To use * the quality filter, you specify the QualityFilter request parameter. *

* * @param unindexedFaces * An array of faces that were detected in the image but weren't indexed. They weren't indexed because the * quality filter identified them as low quality, or the MaxFaces request parameter filtered * them out. To use the quality filter, you specify the QualityFilter request parameter. * @return Returns a reference to this object so that method calls can be chained together. */ public IndexFacesResult withUnindexedFaces(java.util.Collection unindexedFaces) { setUnindexedFaces(unindexedFaces); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getFaceRecords() != null) sb.append("FaceRecords: ").append(getFaceRecords()).append(","); if (getOrientationCorrection() != null) sb.append("OrientationCorrection: ").append(getOrientationCorrection()).append(","); if (getFaceModelVersion() != null) sb.append("FaceModelVersion: ").append(getFaceModelVersion()).append(","); if (getUnindexedFaces() != null) sb.append("UnindexedFaces: ").append(getUnindexedFaces()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof IndexFacesResult == false) return false; IndexFacesResult other = (IndexFacesResult) obj; if (other.getFaceRecords() == null ^ this.getFaceRecords() == null) return false; if (other.getFaceRecords() != null && other.getFaceRecords().equals(this.getFaceRecords()) == false) return false; if (other.getOrientationCorrection() == null ^ this.getOrientationCorrection() == null) return false; if (other.getOrientationCorrection() != null && other.getOrientationCorrection().equals(this.getOrientationCorrection()) == false) return false; if (other.getFaceModelVersion() == null ^ this.getFaceModelVersion() == null) return false; if (other.getFaceModelVersion() != null && other.getFaceModelVersion().equals(this.getFaceModelVersion()) == false) return false; if (other.getUnindexedFaces() == null ^ this.getUnindexedFaces() == null) return false; if (other.getUnindexedFaces() != null && other.getUnindexedFaces().equals(this.getUnindexedFaces()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getFaceRecords() == null) ? 0 : getFaceRecords().hashCode()); hashCode = prime * hashCode + ((getOrientationCorrection() == null) ? 0 : getOrientationCorrection().hashCode()); hashCode = prime * hashCode + ((getFaceModelVersion() == null) ? 0 : getFaceModelVersion().hashCode()); hashCode = prime * hashCode + ((getUnindexedFaces() == null) ? 0 : getUnindexedFaces().hashCode()); return hashCode; } @Override public IndexFacesResult clone() { try { return (IndexFacesResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }