/******************************************************************************* * Copyright 2012-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"). You may not use * this file except in compliance with the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * ***************************************************************************** * * AWS Tools for Windows (TM) PowerShell (TM) * */ using System; using System.Collections.Generic; using System.Linq; using System.Management.Automation; using System.Text; using Amazon.PowerShell.Common; using Amazon.Runtime; using Amazon.Rekognition; using Amazon.Rekognition.Model; namespace Amazon.PowerShell.Cmdlets.REK { /// <summary> /// Detects faces within an image that is provided as input. /// /// /// <para><code>DetectFaces</code> detects the 100 largest faces in the image. For each face /// detected, the operation returns face details. These details include a bounding box /// of the face, a confidence value (that the bounding box contains a face), and a fixed /// set of attributes such as facial landmarks (for example, coordinates of eye and mouth), /// pose, presence of facial occlusion, and so on. /// </para><para> /// The face-detection algorithm is most effective on frontal faces. For non-frontal or /// obscured faces, the algorithm might not detect the faces or might detect faces with /// lower confidence. /// </para><para> /// You pass the input image either as base64-encoded image bytes or as a reference to /// an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition /// operations, passing image bytes is not supported. The image must be either a PNG or /// JPEG formatted file. /// </para><note><para> /// This is a stateless API operation. That is, the operation does not persist any data. /// </para></note><para> /// This operation requires permissions to perform the <code>rekognition:DetectFaces</code> /// action. /// </para> /// </summary> [Cmdlet("Find", "REKFace")] [OutputType("Amazon.Rekognition.Model.DetectFacesResponse")] [AWSCmdlet("Calls the Amazon Rekognition DetectFaces API operation.", Operation = new[] {"DetectFaces"}, SelectReturnType = typeof(Amazon.Rekognition.Model.DetectFacesResponse))] [AWSCmdletOutput("Amazon.Rekognition.Model.DetectFacesResponse", "This cmdlet returns an Amazon.Rekognition.Model.DetectFacesResponse object containing multiple properties. The object can also be referenced from properties attached to the cmdlet entry in the $AWSHistory stack." )] public partial class FindREKFaceCmdlet : AmazonRekognitionClientCmdlet, IExecutor { #region Parameter Attribute /// <summary> /// <para> /// <para>An array of facial attributes you want to be returned. A <code>DEFAULT</code> subset /// of facial attributes - <code>BoundingBox</code>, <code>Confidence</code>, <code>Pose</code>, /// <code>Quality</code>, and <code>Landmarks</code> - will always be returned. You can /// request for specific facial attributes (in addition to the default list) - by using /// [<code>"DEFAULT", "FACE_OCCLUDED"</code>] or just [<code>"FACE_OCCLUDED"</code>]. /// You can request for all facial attributes by using [<code>"ALL"]</code>. Requesting /// more attributes may increase response time.</para><para>If you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical "AND" /// operator to determine which attributes to return (in this case, all attributes). </para> /// </para> /// </summary> [System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)] [Alias("Attributes")] public System.String[] Attribute { get; set; } #endregion #region Parameter ImageBucket /// <summary> /// <para> /// <para>Name of the S3 bucket.</para> /// </para> /// </summary> [System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)] public System.String ImageBucket { get; set; } #endregion #region Parameter ImageContent /// <summary> /// <para> /// <para>Blob of image bytes up to 5 MBs. Note that the maximum image size you can pass to /// <code>DetectCustomLabels</code> is 4MB. </para> /// </para> /// <para>The cmdlet will automatically convert the supplied parameter of type string, string[], System.IO.FileInfo or System.IO.Stream to byte[] before supplying it to the service.</para> /// </summary> [System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)] [Amazon.PowerShell.Common.MemoryStreamParameterConverter] public byte[] ImageContent { get; set; } #endregion #region Parameter ImageName /// <summary> /// <para> /// <para>S3 object key name.</para> /// </para> /// </summary> [System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)] public System.String ImageName { get; set; } #endregion #region Parameter ImageVersion /// <summary> /// <para> /// <para>If the bucket is versioning enabled, you can specify the object version. </para> /// </para> /// </summary> [System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)] public System.String ImageVersion { get; set; } #endregion #region Parameter Select /// <summary> /// Use the -Select parameter to control the cmdlet output. The default value is '*'. /// Specifying -Select '*' will result in the cmdlet returning the whole service response (Amazon.Rekognition.Model.DetectFacesResponse). /// Specifying the name of a property of type Amazon.Rekognition.Model.DetectFacesResponse will result in that property being returned. /// Specifying -Select '^ParameterName' will result in the cmdlet returning the selected cmdlet parameter value. /// </summary> [System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)] public string Select { get; set; } = "*"; #endregion protected override void ProcessRecord() { this._AWSSignerType = "v4"; base.ProcessRecord(); var context = new CmdletContext(); // allow for manipulation of parameters prior to loading into context PreExecutionContextLoad(context); if (ParameterWasBound(nameof(this.Select))) { context.Select = CreateSelectDelegate<Amazon.Rekognition.Model.DetectFacesResponse, FindREKFaceCmdlet>(Select) ?? throw new System.ArgumentException("Invalid value for -Select parameter.", nameof(this.Select)); } if (this.Attribute != null) { context.Attribute = new List<System.String>(this.Attribute); } context.ImageContent = this.ImageContent; context.ImageBucket = this.ImageBucket; context.ImageName = this.ImageName; context.ImageVersion = this.ImageVersion; // allow further manipulation of loaded context prior to processing PostExecutionContextLoad(context); var output = Execute(context) as CmdletOutput; ProcessOutput(output); } #region IExecutor Members public object Execute(ExecutorContext context) { System.IO.MemoryStream _ImageContentStream = null; try { var cmdletContext = context as CmdletContext; // create request var request = new Amazon.Rekognition.Model.DetectFacesRequest(); if (cmdletContext.Attribute != null) { request.Attributes = cmdletContext.Attribute; } // populate Image var requestImageIsNull = true; request.Image = new Amazon.Rekognition.Model.Image(); System.IO.MemoryStream requestImage_imageContent = null; if (cmdletContext.ImageContent != null) { _ImageContentStream = new System.IO.MemoryStream(cmdletContext.ImageContent); requestImage_imageContent = _ImageContentStream; } if (requestImage_imageContent != null) { request.Image.Bytes = requestImage_imageContent; requestImageIsNull = false; } Amazon.Rekognition.Model.S3Object requestImage_image_S3Object = null; // populate S3Object var requestImage_image_S3ObjectIsNull = true; requestImage_image_S3Object = new Amazon.Rekognition.Model.S3Object(); System.String requestImage_image_S3Object_imageBucket = null; if (cmdletContext.ImageBucket != null) { requestImage_image_S3Object_imageBucket = cmdletContext.ImageBucket; } if (requestImage_image_S3Object_imageBucket != null) { requestImage_image_S3Object.Bucket = requestImage_image_S3Object_imageBucket; requestImage_image_S3ObjectIsNull = false; } System.String requestImage_image_S3Object_imageName = null; if (cmdletContext.ImageName != null) { requestImage_image_S3Object_imageName = cmdletContext.ImageName; } if (requestImage_image_S3Object_imageName != null) { requestImage_image_S3Object.Name = requestImage_image_S3Object_imageName; requestImage_image_S3ObjectIsNull = false; } System.String requestImage_image_S3Object_imageVersion = null; if (cmdletContext.ImageVersion != null) { requestImage_image_S3Object_imageVersion = cmdletContext.ImageVersion; } if (requestImage_image_S3Object_imageVersion != null) { requestImage_image_S3Object.Version = requestImage_image_S3Object_imageVersion; requestImage_image_S3ObjectIsNull = false; } // determine if requestImage_image_S3Object should be set to null if (requestImage_image_S3ObjectIsNull) { requestImage_image_S3Object = null; } if (requestImage_image_S3Object != null) { request.Image.S3Object = requestImage_image_S3Object; requestImageIsNull = false; } // determine if request.Image should be set to null if (requestImageIsNull) { request.Image = null; } CmdletOutput output; // issue call var client = Client ?? CreateClient(_CurrentCredentials, _RegionEndpoint); try { var response = CallAWSServiceOperation(client, request); object pipelineOutput = null; pipelineOutput = cmdletContext.Select(response, this); output = new CmdletOutput { PipelineOutput = pipelineOutput, ServiceResponse = response }; } catch (Exception e) { output = new CmdletOutput { ErrorResponse = e }; } return output; } finally { if( _ImageContentStream != null) { _ImageContentStream.Dispose(); } } } public ExecutorContext CreateContext() { return new CmdletContext(); } #endregion #region AWS Service Operation Call private Amazon.Rekognition.Model.DetectFacesResponse CallAWSServiceOperation(IAmazonRekognition client, Amazon.Rekognition.Model.DetectFacesRequest request) { Utils.Common.WriteVerboseEndpointMessage(this, client.Config, "Amazon Rekognition", "DetectFaces"); try { #if DESKTOP return client.DetectFaces(request); #elif CORECLR return client.DetectFacesAsync(request).GetAwaiter().GetResult(); #else #error "Unknown build edition" #endif } catch (AmazonServiceException exc) { var webException = exc.InnerException as System.Net.WebException; if (webException != null) { throw new Exception(Utils.Common.FormatNameResolutionFailureMessage(client.Config, webException.Message), webException); } throw; } } #endregion internal partial class CmdletContext : ExecutorContext { public List<System.String> Attribute { get; set; } public byte[] ImageContent { get; set; } public System.String ImageBucket { get; set; } public System.String ImageName { get; set; } public System.String ImageVersion { get; set; } public System.Func<Amazon.Rekognition.Model.DetectFacesResponse, FindREKFaceCmdlet, object> Select { get; set; } = (response, cmdlet) => response; } } }