/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace ComputeOptimizer { namespace Model { /** *

Describes a recommendation option for an Auto Scaling group.

See * Also:

AWS * API Reference

*/ class AutoScalingGroupRecommendationOption { public: AWS_COMPUTEOPTIMIZER_API AutoScalingGroupRecommendationOption(); AWS_COMPUTEOPTIMIZER_API AutoScalingGroupRecommendationOption(Aws::Utils::Json::JsonView jsonValue); AWS_COMPUTEOPTIMIZER_API AutoScalingGroupRecommendationOption& operator=(Aws::Utils::Json::JsonView jsonValue); AWS_COMPUTEOPTIMIZER_API Aws::Utils::Json::JsonValue Jsonize() const; /** *

An array of objects that describe an Auto Scaling group configuration.

*/ inline const AutoScalingGroupConfiguration& GetConfiguration() const{ return m_configuration; } /** *

An array of objects that describe an Auto Scaling group configuration.

*/ inline bool ConfigurationHasBeenSet() const { return m_configurationHasBeenSet; } /** *

An array of objects that describe an Auto Scaling group configuration.

*/ inline void SetConfiguration(const AutoScalingGroupConfiguration& value) { m_configurationHasBeenSet = true; m_configuration = value; } /** *

An array of objects that describe an Auto Scaling group configuration.

*/ inline void SetConfiguration(AutoScalingGroupConfiguration&& value) { m_configurationHasBeenSet = true; m_configuration = std::move(value); } /** *

An array of objects that describe an Auto Scaling group configuration.

*/ inline AutoScalingGroupRecommendationOption& WithConfiguration(const AutoScalingGroupConfiguration& value) { SetConfiguration(value); return *this;} /** *

An array of objects that describe an Auto Scaling group configuration.

*/ inline AutoScalingGroupRecommendationOption& WithConfiguration(AutoScalingGroupConfiguration&& value) { SetConfiguration(std::move(value)); return *this;} /** *

An array of objects that describe the projected utilization metrics of the * Auto Scaling group recommendation option.

The Cpu and * Memory metrics are the only projected utilization metrics returned. * Additionally, the Memory metric is returned only for resources that * have the unified CloudWatch agent installed on them. For more information, see * Enabling * Memory Utilization with the CloudWatch Agent.

*/ inline const Aws::Vector& GetProjectedUtilizationMetrics() const{ return m_projectedUtilizationMetrics; } /** *

An array of objects that describe the projected utilization metrics of the * Auto Scaling group recommendation option.

The Cpu and * Memory metrics are the only projected utilization metrics returned. * Additionally, the Memory metric is returned only for resources that * have the unified CloudWatch agent installed on them. For more information, see * Enabling * Memory Utilization with the CloudWatch Agent.

*/ inline bool ProjectedUtilizationMetricsHasBeenSet() const { return m_projectedUtilizationMetricsHasBeenSet; } /** *

An array of objects that describe the projected utilization metrics of the * Auto Scaling group recommendation option.

The Cpu and * Memory metrics are the only projected utilization metrics returned. * Additionally, the Memory metric is returned only for resources that * have the unified CloudWatch agent installed on them. For more information, see * Enabling * Memory Utilization with the CloudWatch Agent.

*/ inline void SetProjectedUtilizationMetrics(const Aws::Vector& value) { m_projectedUtilizationMetricsHasBeenSet = true; m_projectedUtilizationMetrics = value; } /** *

An array of objects that describe the projected utilization metrics of the * Auto Scaling group recommendation option.

The Cpu and * Memory metrics are the only projected utilization metrics returned. * Additionally, the Memory metric is returned only for resources that * have the unified CloudWatch agent installed on them. For more information, see * Enabling * Memory Utilization with the CloudWatch Agent.

*/ inline void SetProjectedUtilizationMetrics(Aws::Vector&& value) { m_projectedUtilizationMetricsHasBeenSet = true; m_projectedUtilizationMetrics = std::move(value); } /** *

An array of objects that describe the projected utilization metrics of the * Auto Scaling group recommendation option.

The Cpu and * Memory metrics are the only projected utilization metrics returned. * Additionally, the Memory metric is returned only for resources that * have the unified CloudWatch agent installed on them. For more information, see * Enabling * Memory Utilization with the CloudWatch Agent.

*/ inline AutoScalingGroupRecommendationOption& WithProjectedUtilizationMetrics(const Aws::Vector& value) { SetProjectedUtilizationMetrics(value); return *this;} /** *

An array of objects that describe the projected utilization metrics of the * Auto Scaling group recommendation option.

The Cpu and * Memory metrics are the only projected utilization metrics returned. * Additionally, the Memory metric is returned only for resources that * have the unified CloudWatch agent installed on them. For more information, see * Enabling * Memory Utilization with the CloudWatch Agent.

*/ inline AutoScalingGroupRecommendationOption& WithProjectedUtilizationMetrics(Aws::Vector&& value) { SetProjectedUtilizationMetrics(std::move(value)); return *this;} /** *

An array of objects that describe the projected utilization metrics of the * Auto Scaling group recommendation option.

The Cpu and * Memory metrics are the only projected utilization metrics returned. * Additionally, the Memory metric is returned only for resources that * have the unified CloudWatch agent installed on them. For more information, see * Enabling * Memory Utilization with the CloudWatch Agent.

*/ inline AutoScalingGroupRecommendationOption& AddProjectedUtilizationMetrics(const UtilizationMetric& value) { m_projectedUtilizationMetricsHasBeenSet = true; m_projectedUtilizationMetrics.push_back(value); return *this; } /** *

An array of objects that describe the projected utilization metrics of the * Auto Scaling group recommendation option.

The Cpu and * Memory metrics are the only projected utilization metrics returned. * Additionally, the Memory metric is returned only for resources that * have the unified CloudWatch agent installed on them. For more information, see * Enabling * Memory Utilization with the CloudWatch Agent.

*/ inline AutoScalingGroupRecommendationOption& AddProjectedUtilizationMetrics(UtilizationMetric&& value) { m_projectedUtilizationMetricsHasBeenSet = true; m_projectedUtilizationMetrics.push_back(std::move(value)); return *this; } /** *

The performance risk of the Auto Scaling group configuration * recommendation.

Performance risk indicates the likelihood of the * recommended instance type not meeting the resource needs of your workload. * Compute Optimizer calculates an individual performance risk score for each * specification of the recommended instance, including CPU, memory, EBS * throughput, EBS IOPS, disk throughput, disk IOPS, network throughput, and * network PPS. The performance risk of the recommended instance is calculated as * the maximum performance risk score across the analyzed resource * specifications.

The value ranges from 0 - 4, * with 0 meaning that the recommended resource is predicted to always * provide enough hardware capability. The higher the performance risk is, the more * likely you should validate whether the recommendation will meet the performance * requirements of your workload before migrating your resource.

*/ inline double GetPerformanceRisk() const{ return m_performanceRisk; } /** *

The performance risk of the Auto Scaling group configuration * recommendation.

Performance risk indicates the likelihood of the * recommended instance type not meeting the resource needs of your workload. * Compute Optimizer calculates an individual performance risk score for each * specification of the recommended instance, including CPU, memory, EBS * throughput, EBS IOPS, disk throughput, disk IOPS, network throughput, and * network PPS. The performance risk of the recommended instance is calculated as * the maximum performance risk score across the analyzed resource * specifications.

The value ranges from 0 - 4, * with 0 meaning that the recommended resource is predicted to always * provide enough hardware capability. The higher the performance risk is, the more * likely you should validate whether the recommendation will meet the performance * requirements of your workload before migrating your resource.

*/ inline bool PerformanceRiskHasBeenSet() const { return m_performanceRiskHasBeenSet; } /** *

The performance risk of the Auto Scaling group configuration * recommendation.

Performance risk indicates the likelihood of the * recommended instance type not meeting the resource needs of your workload. * Compute Optimizer calculates an individual performance risk score for each * specification of the recommended instance, including CPU, memory, EBS * throughput, EBS IOPS, disk throughput, disk IOPS, network throughput, and * network PPS. The performance risk of the recommended instance is calculated as * the maximum performance risk score across the analyzed resource * specifications.

The value ranges from 0 - 4, * with 0 meaning that the recommended resource is predicted to always * provide enough hardware capability. The higher the performance risk is, the more * likely you should validate whether the recommendation will meet the performance * requirements of your workload before migrating your resource.

*/ inline void SetPerformanceRisk(double value) { m_performanceRiskHasBeenSet = true; m_performanceRisk = value; } /** *

The performance risk of the Auto Scaling group configuration * recommendation.

Performance risk indicates the likelihood of the * recommended instance type not meeting the resource needs of your workload. * Compute Optimizer calculates an individual performance risk score for each * specification of the recommended instance, including CPU, memory, EBS * throughput, EBS IOPS, disk throughput, disk IOPS, network throughput, and * network PPS. The performance risk of the recommended instance is calculated as * the maximum performance risk score across the analyzed resource * specifications.

The value ranges from 0 - 4, * with 0 meaning that the recommended resource is predicted to always * provide enough hardware capability. The higher the performance risk is, the more * likely you should validate whether the recommendation will meet the performance * requirements of your workload before migrating your resource.

*/ inline AutoScalingGroupRecommendationOption& WithPerformanceRisk(double value) { SetPerformanceRisk(value); return *this;} /** *

The rank of the Auto Scaling group recommendation option.

The top * recommendation option is ranked as 1.

*/ inline int GetRank() const{ return m_rank; } /** *

The rank of the Auto Scaling group recommendation option.

The top * recommendation option is ranked as 1.

*/ inline bool RankHasBeenSet() const { return m_rankHasBeenSet; } /** *

The rank of the Auto Scaling group recommendation option.

The top * recommendation option is ranked as 1.

*/ inline void SetRank(int value) { m_rankHasBeenSet = true; m_rank = value; } /** *

The rank of the Auto Scaling group recommendation option.

The top * recommendation option is ranked as 1.

*/ inline AutoScalingGroupRecommendationOption& WithRank(int value) { SetRank(value); return *this;} /** *

An object that describes the savings opportunity for the Auto Scaling group * recommendation option. Savings opportunity includes the estimated monthly * savings amount and percentage.

*/ inline const SavingsOpportunity& GetSavingsOpportunity() const{ return m_savingsOpportunity; } /** *

An object that describes the savings opportunity for the Auto Scaling group * recommendation option. Savings opportunity includes the estimated monthly * savings amount and percentage.

*/ inline bool SavingsOpportunityHasBeenSet() const { return m_savingsOpportunityHasBeenSet; } /** *

An object that describes the savings opportunity for the Auto Scaling group * recommendation option. Savings opportunity includes the estimated monthly * savings amount and percentage.

*/ inline void SetSavingsOpportunity(const SavingsOpportunity& value) { m_savingsOpportunityHasBeenSet = true; m_savingsOpportunity = value; } /** *

An object that describes the savings opportunity for the Auto Scaling group * recommendation option. Savings opportunity includes the estimated monthly * savings amount and percentage.

*/ inline void SetSavingsOpportunity(SavingsOpportunity&& value) { m_savingsOpportunityHasBeenSet = true; m_savingsOpportunity = std::move(value); } /** *

An object that describes the savings opportunity for the Auto Scaling group * recommendation option. Savings opportunity includes the estimated monthly * savings amount and percentage.

*/ inline AutoScalingGroupRecommendationOption& WithSavingsOpportunity(const SavingsOpportunity& value) { SetSavingsOpportunity(value); return *this;} /** *

An object that describes the savings opportunity for the Auto Scaling group * recommendation option. Savings opportunity includes the estimated monthly * savings amount and percentage.

*/ inline AutoScalingGroupRecommendationOption& WithSavingsOpportunity(SavingsOpportunity&& value) { SetSavingsOpportunity(std::move(value)); return *this;} /** *

The level of effort required to migrate from the current instance type to the * recommended instance type.

For example, the migration effort is * Low if Amazon EMR is the inferred workload type and an Amazon Web * Services Graviton instance type is recommended. The migration effort is * Medium if a workload type couldn't be inferred but an Amazon Web * Services Graviton instance type is recommended. The migration effort is * VeryLow if both the current and recommended instance types are of * the same CPU architecture.

*/ inline const MigrationEffort& GetMigrationEffort() const{ return m_migrationEffort; } /** *

The level of effort required to migrate from the current instance type to the * recommended instance type.

For example, the migration effort is * Low if Amazon EMR is the inferred workload type and an Amazon Web * Services Graviton instance type is recommended. The migration effort is * Medium if a workload type couldn't be inferred but an Amazon Web * Services Graviton instance type is recommended. The migration effort is * VeryLow if both the current and recommended instance types are of * the same CPU architecture.

*/ inline bool MigrationEffortHasBeenSet() const { return m_migrationEffortHasBeenSet; } /** *

The level of effort required to migrate from the current instance type to the * recommended instance type.

For example, the migration effort is * Low if Amazon EMR is the inferred workload type and an Amazon Web * Services Graviton instance type is recommended. The migration effort is * Medium if a workload type couldn't be inferred but an Amazon Web * Services Graviton instance type is recommended. The migration effort is * VeryLow if both the current and recommended instance types are of * the same CPU architecture.

*/ inline void SetMigrationEffort(const MigrationEffort& value) { m_migrationEffortHasBeenSet = true; m_migrationEffort = value; } /** *

The level of effort required to migrate from the current instance type to the * recommended instance type.

For example, the migration effort is * Low if Amazon EMR is the inferred workload type and an Amazon Web * Services Graviton instance type is recommended. The migration effort is * Medium if a workload type couldn't be inferred but an Amazon Web * Services Graviton instance type is recommended. The migration effort is * VeryLow if both the current and recommended instance types are of * the same CPU architecture.

*/ inline void SetMigrationEffort(MigrationEffort&& value) { m_migrationEffortHasBeenSet = true; m_migrationEffort = std::move(value); } /** *

The level of effort required to migrate from the current instance type to the * recommended instance type.

For example, the migration effort is * Low if Amazon EMR is the inferred workload type and an Amazon Web * Services Graviton instance type is recommended. The migration effort is * Medium if a workload type couldn't be inferred but an Amazon Web * Services Graviton instance type is recommended. The migration effort is * VeryLow if both the current and recommended instance types are of * the same CPU architecture.

*/ inline AutoScalingGroupRecommendationOption& WithMigrationEffort(const MigrationEffort& value) { SetMigrationEffort(value); return *this;} /** *

The level of effort required to migrate from the current instance type to the * recommended instance type.

For example, the migration effort is * Low if Amazon EMR is the inferred workload type and an Amazon Web * Services Graviton instance type is recommended. The migration effort is * Medium if a workload type couldn't be inferred but an Amazon Web * Services Graviton instance type is recommended. The migration effort is * VeryLow if both the current and recommended instance types are of * the same CPU architecture.

*/ inline AutoScalingGroupRecommendationOption& WithMigrationEffort(MigrationEffort&& value) { SetMigrationEffort(std::move(value)); return *this;} private: AutoScalingGroupConfiguration m_configuration; bool m_configurationHasBeenSet = false; Aws::Vector m_projectedUtilizationMetrics; bool m_projectedUtilizationMetricsHasBeenSet = false; double m_performanceRisk; bool m_performanceRiskHasBeenSet = false; int m_rank; bool m_rankHasBeenSet = false; SavingsOpportunity m_savingsOpportunity; bool m_savingsOpportunityHasBeenSet = false; MigrationEffort m_migrationEffort; bool m_migrationEffortHasBeenSet = false; }; } // namespace Model } // namespace ComputeOptimizer } // namespace Aws