/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Provides information about a single type of inappropriate, unwanted, or
* offensive content found in an image or video. Each type of moderated content has
* a label within a hierarchical taxonomy. For more information, see Content
* moderation in the Amazon Rekognition Developer Guide.See Also:
* AWS
* API Reference
Specifies the confidence that Amazon Rekognition has that the label has been * correctly identified.
If you don't specify the MinConfidence
* parameter in the call to DetectModerationLabels
, the operation
* returns labels with a confidence value greater than or equal to 50 percent.
Specifies the confidence that Amazon Rekognition has that the label has been * correctly identified.
If you don't specify the MinConfidence
* parameter in the call to DetectModerationLabels
, the operation
* returns labels with a confidence value greater than or equal to 50 percent.
Specifies the confidence that Amazon Rekognition has that the label has been * correctly identified.
If you don't specify the MinConfidence
* parameter in the call to DetectModerationLabels
, the operation
* returns labels with a confidence value greater than or equal to 50 percent.
Specifies the confidence that Amazon Rekognition has that the label has been * correctly identified.
If you don't specify the MinConfidence
* parameter in the call to DetectModerationLabels
, the operation
* returns labels with a confidence value greater than or equal to 50 percent.
The label name for the type of unsafe content detected in the image.
*/ inline const Aws::String& GetName() const{ return m_name; } /** *The label name for the type of unsafe content detected in the image.
*/ inline bool NameHasBeenSet() const { return m_nameHasBeenSet; } /** *The label name for the type of unsafe content detected in the image.
*/ inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; } /** *The label name for the type of unsafe content detected in the image.
*/ inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); } /** *The label name for the type of unsafe content detected in the image.
*/ inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); } /** *The label name for the type of unsafe content detected in the image.
*/ inline ModerationLabel& WithName(const Aws::String& value) { SetName(value); return *this;} /** *The label name for the type of unsafe content detected in the image.
*/ inline ModerationLabel& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;} /** *The label name for the type of unsafe content detected in the image.
*/ inline ModerationLabel& WithName(const char* value) { SetName(value); return *this;} /** *The name for the parent label. Labels at the top level of the hierarchy have
* the parent label ""
.
The name for the parent label. Labels at the top level of the hierarchy have
* the parent label ""
.
The name for the parent label. Labels at the top level of the hierarchy have
* the parent label ""
.
The name for the parent label. Labels at the top level of the hierarchy have
* the parent label ""
.
The name for the parent label. Labels at the top level of the hierarchy have
* the parent label ""
.
The name for the parent label. Labels at the top level of the hierarchy have
* the parent label ""
.
The name for the parent label. Labels at the top level of the hierarchy have
* the parent label ""
.
The name for the parent label. Labels at the top level of the hierarchy have
* the parent label ""
.