/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include The evaluation metrics for the find matches algorithm. The quality of your
* machine learning transform is measured by getting your transform to predict some
* matches and comparing the results to known matches from the same dataset. The
* quality metrics are based on a subset of your data, so they are not
* precise.See Also:
AWS
* API Reference
The area under the precision/recall curve (AUPRC) is a single number * measuring the overall quality of the transform, that is independent of the * choice made for precision vs. recall. Higher values indicate that you have a * more attractive precision vs. recall tradeoff.
For more information, see * Precision and * recall in Wikipedia.
*/ inline double GetAreaUnderPRCurve() const{ return m_areaUnderPRCurve; } /** *The area under the precision/recall curve (AUPRC) is a single number * measuring the overall quality of the transform, that is independent of the * choice made for precision vs. recall. Higher values indicate that you have a * more attractive precision vs. recall tradeoff.
For more information, see * Precision and * recall in Wikipedia.
*/ inline bool AreaUnderPRCurveHasBeenSet() const { return m_areaUnderPRCurveHasBeenSet; } /** *The area under the precision/recall curve (AUPRC) is a single number * measuring the overall quality of the transform, that is independent of the * choice made for precision vs. recall. Higher values indicate that you have a * more attractive precision vs. recall tradeoff.
For more information, see * Precision and * recall in Wikipedia.
*/ inline void SetAreaUnderPRCurve(double value) { m_areaUnderPRCurveHasBeenSet = true; m_areaUnderPRCurve = value; } /** *The area under the precision/recall curve (AUPRC) is a single number * measuring the overall quality of the transform, that is independent of the * choice made for precision vs. recall. Higher values indicate that you have a * more attractive precision vs. recall tradeoff.
For more information, see * Precision and * recall in Wikipedia.
*/ inline FindMatchesMetrics& WithAreaUnderPRCurve(double value) { SetAreaUnderPRCurve(value); return *this;} /** *The precision metric indicates when often your transform is correct when it * predicts a match. Specifically, it measures how well the transform finds true * positives from the total true positives possible.
For more information, * see Precision and * recall in Wikipedia.
*/ inline double GetPrecision() const{ return m_precision; } /** *The precision metric indicates when often your transform is correct when it * predicts a match. Specifically, it measures how well the transform finds true * positives from the total true positives possible.
For more information, * see Precision and * recall in Wikipedia.
*/ inline bool PrecisionHasBeenSet() const { return m_precisionHasBeenSet; } /** *The precision metric indicates when often your transform is correct when it * predicts a match. Specifically, it measures how well the transform finds true * positives from the total true positives possible.
For more information, * see Precision and * recall in Wikipedia.
*/ inline void SetPrecision(double value) { m_precisionHasBeenSet = true; m_precision = value; } /** *The precision metric indicates when often your transform is correct when it * predicts a match. Specifically, it measures how well the transform finds true * positives from the total true positives possible.
For more information, * see Precision and * recall in Wikipedia.
*/ inline FindMatchesMetrics& WithPrecision(double value) { SetPrecision(value); return *this;} /** *The recall metric indicates that for an actual match, how often your * transform predicts the match. Specifically, it measures how well the transform * finds true positives from the total records in the source data.
For more * information, see Precision and * recall in Wikipedia.
*/ inline double GetRecall() const{ return m_recall; } /** *The recall metric indicates that for an actual match, how often your * transform predicts the match. Specifically, it measures how well the transform * finds true positives from the total records in the source data.
For more * information, see Precision and * recall in Wikipedia.
*/ inline bool RecallHasBeenSet() const { return m_recallHasBeenSet; } /** *The recall metric indicates that for an actual match, how often your * transform predicts the match. Specifically, it measures how well the transform * finds true positives from the total records in the source data.
For more * information, see Precision and * recall in Wikipedia.
*/ inline void SetRecall(double value) { m_recallHasBeenSet = true; m_recall = value; } /** *The recall metric indicates that for an actual match, how often your * transform predicts the match. Specifically, it measures how well the transform * finds true positives from the total records in the source data.
For more * information, see Precision and * recall in Wikipedia.
*/ inline FindMatchesMetrics& WithRecall(double value) { SetRecall(value); return *this;} /** *The maximum F1 metric indicates the transform's accuracy between 0 and 1, * where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
*/ inline double GetF1() const{ return m_f1; } /** *The maximum F1 metric indicates the transform's accuracy between 0 and 1, * where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
*/ inline bool F1HasBeenSet() const { return m_f1HasBeenSet; } /** *The maximum F1 metric indicates the transform's accuracy between 0 and 1, * where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
*/ inline void SetF1(double value) { m_f1HasBeenSet = true; m_f1 = value; } /** *The maximum F1 metric indicates the transform's accuracy between 0 and 1, * where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
*/ inline FindMatchesMetrics& WithF1(double value) { SetF1(value); return *this;} /** *The confusion matrix shows you what your transform is predicting accurately * and what types of errors it is making.
For more information, see Confusion matrix in * Wikipedia.
*/ inline const ConfusionMatrix& GetConfusionMatrix() const{ return m_confusionMatrix; } /** *The confusion matrix shows you what your transform is predicting accurately * and what types of errors it is making.
For more information, see Confusion matrix in * Wikipedia.
*/ inline bool ConfusionMatrixHasBeenSet() const { return m_confusionMatrixHasBeenSet; } /** *The confusion matrix shows you what your transform is predicting accurately * and what types of errors it is making.
For more information, see Confusion matrix in * Wikipedia.
*/ inline void SetConfusionMatrix(const ConfusionMatrix& value) { m_confusionMatrixHasBeenSet = true; m_confusionMatrix = value; } /** *The confusion matrix shows you what your transform is predicting accurately * and what types of errors it is making.
For more information, see Confusion matrix in * Wikipedia.
*/ inline void SetConfusionMatrix(ConfusionMatrix&& value) { m_confusionMatrixHasBeenSet = true; m_confusionMatrix = std::move(value); } /** *The confusion matrix shows you what your transform is predicting accurately * and what types of errors it is making.
For more information, see Confusion matrix in * Wikipedia.
*/ inline FindMatchesMetrics& WithConfusionMatrix(const ConfusionMatrix& value) { SetConfusionMatrix(value); return *this;} /** *The confusion matrix shows you what your transform is predicting accurately * and what types of errors it is making.
For more information, see Confusion matrix in * Wikipedia.
*/ inline FindMatchesMetrics& WithConfusionMatrix(ConfusionMatrix&& value) { SetConfusionMatrix(std::move(value)); return *this;} /** *A list of ColumnImportance
structures containing column
* importance metrics, sorted in order of descending importance.
A list of ColumnImportance
structures containing column
* importance metrics, sorted in order of descending importance.
A list of ColumnImportance
structures containing column
* importance metrics, sorted in order of descending importance.
A list of ColumnImportance
structures containing column
* importance metrics, sorted in order of descending importance.
A list of ColumnImportance
structures containing column
* importance metrics, sorted in order of descending importance.
A list of ColumnImportance
structures containing column
* importance metrics, sorted in order of descending importance.
A list of ColumnImportance
structures containing column
* importance metrics, sorted in order of descending importance.
A list of ColumnImportance
structures containing column
* importance metrics, sorted in order of descending importance.