/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include The metrics for an existing endpoint compared in an Inference Recommender
* job.See Also:
AWS
* API Reference
The expected maximum number of requests per minute for the instance.
*/ inline int GetMaxInvocations() const{ return m_maxInvocations; } /** *The expected maximum number of requests per minute for the instance.
*/ inline bool MaxInvocationsHasBeenSet() const { return m_maxInvocationsHasBeenSet; } /** *The expected maximum number of requests per minute for the instance.
*/ inline void SetMaxInvocations(int value) { m_maxInvocationsHasBeenSet = true; m_maxInvocations = value; } /** *The expected maximum number of requests per minute for the instance.
*/ inline InferenceMetrics& WithMaxInvocations(int value) { SetMaxInvocations(value); return *this;} /** *The expected model latency at maximum invocations per minute for the * instance.
*/ inline int GetModelLatency() const{ return m_modelLatency; } /** *The expected model latency at maximum invocations per minute for the * instance.
*/ inline bool ModelLatencyHasBeenSet() const { return m_modelLatencyHasBeenSet; } /** *The expected model latency at maximum invocations per minute for the * instance.
*/ inline void SetModelLatency(int value) { m_modelLatencyHasBeenSet = true; m_modelLatency = value; } /** *The expected model latency at maximum invocations per minute for the * instance.
*/ inline InferenceMetrics& WithModelLatency(int value) { SetModelLatency(value); return *this;} private: int m_maxInvocations; bool m_maxInvocationsHasBeenSet = false; int m_modelLatency; bool m_modelLatencyHasBeenSet = false; }; } // namespace Model } // namespace SageMaker } // namespace Aws