/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include namespace Aws { namespace Utils { namespace Xml { class XmlNode; } // namespace Xml } // namespace Utils namespace EC2 { namespace Model { /** *

Describes the Inference accelerators for the instance type.

See * Also:

AWS * API Reference

*/ class InferenceAcceleratorInfo { public: AWS_EC2_API InferenceAcceleratorInfo(); AWS_EC2_API InferenceAcceleratorInfo(const Aws::Utils::Xml::XmlNode& xmlNode); AWS_EC2_API InferenceAcceleratorInfo& operator=(const Aws::Utils::Xml::XmlNode& xmlNode); AWS_EC2_API void OutputToStream(Aws::OStream& ostream, const char* location, unsigned index, const char* locationValue) const; AWS_EC2_API void OutputToStream(Aws::OStream& oStream, const char* location) const; /** *

Describes the Inference accelerators for the instance type.

*/ inline const Aws::Vector& GetAccelerators() const{ return m_accelerators; } /** *

Describes the Inference accelerators for the instance type.

*/ inline bool AcceleratorsHasBeenSet() const { return m_acceleratorsHasBeenSet; } /** *

Describes the Inference accelerators for the instance type.

*/ inline void SetAccelerators(const Aws::Vector& value) { m_acceleratorsHasBeenSet = true; m_accelerators = value; } /** *

Describes the Inference accelerators for the instance type.

*/ inline void SetAccelerators(Aws::Vector&& value) { m_acceleratorsHasBeenSet = true; m_accelerators = std::move(value); } /** *

Describes the Inference accelerators for the instance type.

*/ inline InferenceAcceleratorInfo& WithAccelerators(const Aws::Vector& value) { SetAccelerators(value); return *this;} /** *

Describes the Inference accelerators for the instance type.

*/ inline InferenceAcceleratorInfo& WithAccelerators(Aws::Vector&& value) { SetAccelerators(std::move(value)); return *this;} /** *

Describes the Inference accelerators for the instance type.

*/ inline InferenceAcceleratorInfo& AddAccelerators(const InferenceDeviceInfo& value) { m_acceleratorsHasBeenSet = true; m_accelerators.push_back(value); return *this; } /** *

Describes the Inference accelerators for the instance type.

*/ inline InferenceAcceleratorInfo& AddAccelerators(InferenceDeviceInfo&& value) { m_acceleratorsHasBeenSet = true; m_accelerators.push_back(std::move(value)); return *this; } /** *

The total size of the memory for the inference accelerators for the instance * type, in MiB.

*/ inline int GetTotalInferenceMemoryInMiB() const{ return m_totalInferenceMemoryInMiB; } /** *

The total size of the memory for the inference accelerators for the instance * type, in MiB.

*/ inline bool TotalInferenceMemoryInMiBHasBeenSet() const { return m_totalInferenceMemoryInMiBHasBeenSet; } /** *

The total size of the memory for the inference accelerators for the instance * type, in MiB.

*/ inline void SetTotalInferenceMemoryInMiB(int value) { m_totalInferenceMemoryInMiBHasBeenSet = true; m_totalInferenceMemoryInMiB = value; } /** *

The total size of the memory for the inference accelerators for the instance * type, in MiB.

*/ inline InferenceAcceleratorInfo& WithTotalInferenceMemoryInMiB(int value) { SetTotalInferenceMemoryInMiB(value); return *this;} private: Aws::Vector m_accelerators; bool m_acceleratorsHasBeenSet = false; int m_totalInferenceMemoryInMiB; bool m_totalInferenceMemoryInMiBHasBeenSet = false; }; } // namespace Model } // namespace EC2 } // namespace Aws