/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include namespace Aws { namespace LookoutforVision { namespace Model { /** */ class StartModelRequest : public LookoutforVisionRequest { public: AWS_LOOKOUTFORVISION_API StartModelRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "StartModel"; } AWS_LOOKOUTFORVISION_API Aws::String SerializePayload() const override; AWS_LOOKOUTFORVISION_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override; /** *

The name of the project that contains the model that you want to start.

*/ inline const Aws::String& GetProjectName() const{ return m_projectName; } /** *

The name of the project that contains the model that you want to start.

*/ inline bool ProjectNameHasBeenSet() const { return m_projectNameHasBeenSet; } /** *

The name of the project that contains the model that you want to start.

*/ inline void SetProjectName(const Aws::String& value) { m_projectNameHasBeenSet = true; m_projectName = value; } /** *

The name of the project that contains the model that you want to start.

*/ inline void SetProjectName(Aws::String&& value) { m_projectNameHasBeenSet = true; m_projectName = std::move(value); } /** *

The name of the project that contains the model that you want to start.

*/ inline void SetProjectName(const char* value) { m_projectNameHasBeenSet = true; m_projectName.assign(value); } /** *

The name of the project that contains the model that you want to start.

*/ inline StartModelRequest& WithProjectName(const Aws::String& value) { SetProjectName(value); return *this;} /** *

The name of the project that contains the model that you want to start.

*/ inline StartModelRequest& WithProjectName(Aws::String&& value) { SetProjectName(std::move(value)); return *this;} /** *

The name of the project that contains the model that you want to start.

*/ inline StartModelRequest& WithProjectName(const char* value) { SetProjectName(value); return *this;} /** *

The version of the model that you want to start.

*/ inline const Aws::String& GetModelVersion() const{ return m_modelVersion; } /** *

The version of the model that you want to start.

*/ inline bool ModelVersionHasBeenSet() const { return m_modelVersionHasBeenSet; } /** *

The version of the model that you want to start.

*/ inline void SetModelVersion(const Aws::String& value) { m_modelVersionHasBeenSet = true; m_modelVersion = value; } /** *

The version of the model that you want to start.

*/ inline void SetModelVersion(Aws::String&& value) { m_modelVersionHasBeenSet = true; m_modelVersion = std::move(value); } /** *

The version of the model that you want to start.

*/ inline void SetModelVersion(const char* value) { m_modelVersionHasBeenSet = true; m_modelVersion.assign(value); } /** *

The version of the model that you want to start.

*/ inline StartModelRequest& WithModelVersion(const Aws::String& value) { SetModelVersion(value); return *this;} /** *

The version of the model that you want to start.

*/ inline StartModelRequest& WithModelVersion(Aws::String&& value) { SetModelVersion(std::move(value)); return *this;} /** *

The version of the model that you want to start.

*/ inline StartModelRequest& WithModelVersion(const char* value) { SetModelVersion(value); return *this;} /** *

The minimum number of inference units to use. A single inference unit * represents 1 hour of processing. Use a higher number to increase the TPS * throughput of your model. You are charged for the number of inference units that * you use.

*/ inline int GetMinInferenceUnits() const{ return m_minInferenceUnits; } /** *

The minimum number of inference units to use. A single inference unit * represents 1 hour of processing. Use a higher number to increase the TPS * throughput of your model. You are charged for the number of inference units that * you use.

*/ inline bool MinInferenceUnitsHasBeenSet() const { return m_minInferenceUnitsHasBeenSet; } /** *

The minimum number of inference units to use. A single inference unit * represents 1 hour of processing. Use a higher number to increase the TPS * throughput of your model. You are charged for the number of inference units that * you use.

*/ inline void SetMinInferenceUnits(int value) { m_minInferenceUnitsHasBeenSet = true; m_minInferenceUnits = value; } /** *

The minimum number of inference units to use. A single inference unit * represents 1 hour of processing. Use a higher number to increase the TPS * throughput of your model. You are charged for the number of inference units that * you use.

*/ inline StartModelRequest& WithMinInferenceUnits(int value) { SetMinInferenceUnits(value); return *this;} /** *

ClientToken is an idempotency token that ensures a call to * StartModel completes only once. You choose the value to pass. For * example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to * StartModel by using the same ClientToken parameter * value.

If you don't supply a value for ClientToken, the AWS * SDK you are using inserts a value for you. This prevents retries after a network * error from making multiple start requests. You'll need to provide your own value * for other use cases.

An error occurs if the other input parameters are * not the same as in the first request. Using a different value for * ClientToken is considered a new call to StartModel. An * idempotency token is active for 8 hours.

*/ inline const Aws::String& GetClientToken() const{ return m_clientToken; } /** *

ClientToken is an idempotency token that ensures a call to * StartModel completes only once. You choose the value to pass. For * example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to * StartModel by using the same ClientToken parameter * value.

If you don't supply a value for ClientToken, the AWS * SDK you are using inserts a value for you. This prevents retries after a network * error from making multiple start requests. You'll need to provide your own value * for other use cases.

An error occurs if the other input parameters are * not the same as in the first request. Using a different value for * ClientToken is considered a new call to StartModel. An * idempotency token is active for 8 hours.

*/ inline bool ClientTokenHasBeenSet() const { return m_clientTokenHasBeenSet; } /** *

ClientToken is an idempotency token that ensures a call to * StartModel completes only once. You choose the value to pass. For * example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to * StartModel by using the same ClientToken parameter * value.

If you don't supply a value for ClientToken, the AWS * SDK you are using inserts a value for you. This prevents retries after a network * error from making multiple start requests. You'll need to provide your own value * for other use cases.

An error occurs if the other input parameters are * not the same as in the first request. Using a different value for * ClientToken is considered a new call to StartModel. An * idempotency token is active for 8 hours.

*/ inline void SetClientToken(const Aws::String& value) { m_clientTokenHasBeenSet = true; m_clientToken = value; } /** *

ClientToken is an idempotency token that ensures a call to * StartModel completes only once. You choose the value to pass. For * example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to * StartModel by using the same ClientToken parameter * value.

If you don't supply a value for ClientToken, the AWS * SDK you are using inserts a value for you. This prevents retries after a network * error from making multiple start requests. You'll need to provide your own value * for other use cases.

An error occurs if the other input parameters are * not the same as in the first request. Using a different value for * ClientToken is considered a new call to StartModel. An * idempotency token is active for 8 hours.

*/ inline void SetClientToken(Aws::String&& value) { m_clientTokenHasBeenSet = true; m_clientToken = std::move(value); } /** *

ClientToken is an idempotency token that ensures a call to * StartModel completes only once. You choose the value to pass. For * example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to * StartModel by using the same ClientToken parameter * value.

If you don't supply a value for ClientToken, the AWS * SDK you are using inserts a value for you. This prevents retries after a network * error from making multiple start requests. You'll need to provide your own value * for other use cases.

An error occurs if the other input parameters are * not the same as in the first request. Using a different value for * ClientToken is considered a new call to StartModel. An * idempotency token is active for 8 hours.

*/ inline void SetClientToken(const char* value) { m_clientTokenHasBeenSet = true; m_clientToken.assign(value); } /** *

ClientToken is an idempotency token that ensures a call to * StartModel completes only once. You choose the value to pass. For * example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to * StartModel by using the same ClientToken parameter * value.

If you don't supply a value for ClientToken, the AWS * SDK you are using inserts a value for you. This prevents retries after a network * error from making multiple start requests. You'll need to provide your own value * for other use cases.

An error occurs if the other input parameters are * not the same as in the first request. Using a different value for * ClientToken is considered a new call to StartModel. An * idempotency token is active for 8 hours.

*/ inline StartModelRequest& WithClientToken(const Aws::String& value) { SetClientToken(value); return *this;} /** *

ClientToken is an idempotency token that ensures a call to * StartModel completes only once. You choose the value to pass. For * example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to * StartModel by using the same ClientToken parameter * value.

If you don't supply a value for ClientToken, the AWS * SDK you are using inserts a value for you. This prevents retries after a network * error from making multiple start requests. You'll need to provide your own value * for other use cases.

An error occurs if the other input parameters are * not the same as in the first request. Using a different value for * ClientToken is considered a new call to StartModel. An * idempotency token is active for 8 hours.

*/ inline StartModelRequest& WithClientToken(Aws::String&& value) { SetClientToken(std::move(value)); return *this;} /** *

ClientToken is an idempotency token that ensures a call to * StartModel completes only once. You choose the value to pass. For * example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to * StartModel by using the same ClientToken parameter * value.

If you don't supply a value for ClientToken, the AWS * SDK you are using inserts a value for you. This prevents retries after a network * error from making multiple start requests. You'll need to provide your own value * for other use cases.

An error occurs if the other input parameters are * not the same as in the first request. Using a different value for * ClientToken is considered a new call to StartModel. An * idempotency token is active for 8 hours.

*/ inline StartModelRequest& WithClientToken(const char* value) { SetClientToken(value); return *this;} /** *

The maximum number of inference units to use for auto-scaling the model. If * you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the * model.

*/ inline int GetMaxInferenceUnits() const{ return m_maxInferenceUnits; } /** *

The maximum number of inference units to use for auto-scaling the model. If * you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the * model.

*/ inline bool MaxInferenceUnitsHasBeenSet() const { return m_maxInferenceUnitsHasBeenSet; } /** *

The maximum number of inference units to use for auto-scaling the model. If * you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the * model.

*/ inline void SetMaxInferenceUnits(int value) { m_maxInferenceUnitsHasBeenSet = true; m_maxInferenceUnits = value; } /** *

The maximum number of inference units to use for auto-scaling the model. If * you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the * model.

*/ inline StartModelRequest& WithMaxInferenceUnits(int value) { SetMaxInferenceUnits(value); return *this;} private: Aws::String m_projectName; bool m_projectNameHasBeenSet = false; Aws::String m_modelVersion; bool m_modelVersionHasBeenSet = false; int m_minInferenceUnits; bool m_minInferenceUnitsHasBeenSet = false; Aws::String m_clientToken; bool m_clientTokenHasBeenSet = false; int m_maxInferenceUnits; bool m_maxInferenceUnitsHasBeenSet = false; }; } // namespace Model } // namespace LookoutforVision } // namespace Aws