/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include namespace Aws { namespace AutoScaling { /** * Amazon EC2 Auto Scaling

Amazon EC2 Auto Scaling is * designed to automatically launch and terminate EC2 instances based on * user-defined scaling policies, scheduled actions, and health checks.

For * more information, see the Amazon EC2 Auto * Scaling User Guide and the Amazon * EC2 Auto Scaling API Reference.

*/ class AWS_AUTOSCALING_API AutoScalingClient : public Aws::Client::AWSXMLClient, public Aws::Client::ClientWithAsyncTemplateMethods { public: typedef Aws::Client::AWSXMLClient BASECLASS; static const char* SERVICE_NAME; static const char* ALLOCATION_TAG; typedef AutoScalingClientConfiguration ClientConfigurationType; typedef AutoScalingEndpointProvider EndpointProviderType; /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ AutoScalingClient(const Aws::AutoScaling::AutoScalingClientConfiguration& clientConfiguration = Aws::AutoScaling::AutoScalingClientConfiguration(), std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG)); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ AutoScalingClient(const Aws::Auth::AWSCredentials& credentials, std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG), const Aws::AutoScaling::AutoScalingClientConfiguration& clientConfiguration = Aws::AutoScaling::AutoScalingClientConfiguration()); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ AutoScalingClient(const std::shared_ptr& credentialsProvider, std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG), const Aws::AutoScaling::AutoScalingClientConfiguration& clientConfiguration = Aws::AutoScaling::AutoScalingClientConfiguration()); /* Legacy constructors due deprecation */ /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ AutoScalingClient(const Aws::Client::ClientConfiguration& clientConfiguration); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ AutoScalingClient(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ AutoScalingClient(const std::shared_ptr& credentialsProvider, const Aws::Client::ClientConfiguration& clientConfiguration); /* End of legacy constructors due deprecation */ virtual ~AutoScalingClient(); /** * Converts any request object to a presigned URL with the GET method, using region for the signer and a timeout of 15 minutes. */ Aws::String ConvertRequestToPresignedUrl(const Aws::AmazonSerializableWebServiceRequest& requestToConvert, const char* region) const; /** *

Attaches one or more EC2 instances to the specified Auto Scaling group.

*

When you attach instances, Amazon EC2 Auto Scaling increases the desired * capacity of the group by the number of instances being attached. If the number * of instances being attached plus the desired capacity of the group exceeds the * maximum size of the group, the operation fails.

If there is a Classic * Load Balancer attached to your Auto Scaling group, the instances are also * registered with the load balancer. If there are target groups attached to your * Auto Scaling group, the instances are also registered with the target * groups.

For more information, see Attach * EC2 instances to your Auto Scaling group in the Amazon EC2 Auto Scaling * User Guide.

See Also:

AWS * API Reference

*/ virtual Model::AttachInstancesOutcome AttachInstances(const Model::AttachInstancesRequest& request) const; /** * A Callable wrapper for AttachInstances that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::AttachInstancesOutcomeCallable AttachInstancesCallable(const AttachInstancesRequestT& request) const { return SubmitCallable(&AutoScalingClient::AttachInstances, request); } /** * An Async wrapper for AttachInstances that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void AttachInstancesAsync(const AttachInstancesRequestT& request, const AttachInstancesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::AttachInstances, request, handler, context); } /** *

This API operation is superseded by AttachTrafficSources, which * can attach multiple traffic sources types. We recommend using * AttachTrafficSources to simplify how you manage traffic sources. * However, we continue to support AttachLoadBalancerTargetGroups. You * can use both the original AttachLoadBalancerTargetGroups API * operation and AttachTrafficSources on the same Auto Scaling * group.

Attaches one or more target groups to the specified Auto * Scaling group.

This operation is used with the following load balancer * types:

  • Application Load Balancer - Operates at the application * layer (layer 7) and supports HTTP and HTTPS.

  • Network Load * Balancer - Operates at the transport layer (layer 4) and supports TCP, TLS, and * UDP.

  • Gateway Load Balancer - Operates at the network layer * (layer 3).

To describe the target groups for an Auto Scaling * group, call the DescribeLoadBalancerTargetGroups API. To detach the * target group from the Auto Scaling group, call the * DetachLoadBalancerTargetGroups API.

This operation is additive and * does not detach existing target groups or Classic Load Balancers from the Auto * Scaling group.

For more information, see Use * Elastic Load Balancing to distribute traffic across the instances in your Auto * Scaling group in the Amazon EC2 Auto Scaling User Guide. *

See Also:

AWS * API Reference

*/ virtual Model::AttachLoadBalancerTargetGroupsOutcome AttachLoadBalancerTargetGroups(const Model::AttachLoadBalancerTargetGroupsRequest& request) const; /** * A Callable wrapper for AttachLoadBalancerTargetGroups that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::AttachLoadBalancerTargetGroupsOutcomeCallable AttachLoadBalancerTargetGroupsCallable(const AttachLoadBalancerTargetGroupsRequestT& request) const { return SubmitCallable(&AutoScalingClient::AttachLoadBalancerTargetGroups, request); } /** * An Async wrapper for AttachLoadBalancerTargetGroups that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void AttachLoadBalancerTargetGroupsAsync(const AttachLoadBalancerTargetGroupsRequestT& request, const AttachLoadBalancerTargetGroupsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::AttachLoadBalancerTargetGroups, request, handler, context); } /** *

This API operation is superseded by AttachTrafficSources, which * can attach multiple traffic sources types. We recommend using * AttachTrafficSources to simplify how you manage traffic sources. * However, we continue to support AttachLoadBalancers. You can use * both the original AttachLoadBalancers API operation and * AttachTrafficSources on the same Auto Scaling group.

*

Attaches one or more Classic Load Balancers to the specified Auto Scaling * group. Amazon EC2 Auto Scaling registers the running instances with these * Classic Load Balancers.

To describe the load balancers for an Auto * Scaling group, call the DescribeLoadBalancers API. To detach a load * balancer from the Auto Scaling group, call the DetachLoadBalancers * API.

This operation is additive and does not detach existing Classic Load * Balancers or target groups from the Auto Scaling group.

For more * information, see Use * Elastic Load Balancing to distribute traffic across the instances in your Auto * Scaling group in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::AttachLoadBalancersOutcome AttachLoadBalancers(const Model::AttachLoadBalancersRequest& request) const; /** * A Callable wrapper for AttachLoadBalancers that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::AttachLoadBalancersOutcomeCallable AttachLoadBalancersCallable(const AttachLoadBalancersRequestT& request) const { return SubmitCallable(&AutoScalingClient::AttachLoadBalancers, request); } /** * An Async wrapper for AttachLoadBalancers that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void AttachLoadBalancersAsync(const AttachLoadBalancersRequestT& request, const AttachLoadBalancersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::AttachLoadBalancers, request, handler, context); } /** *

Attaches one or more traffic sources to the specified Auto Scaling group.

*

You can use any of the following as traffic sources for an Auto Scaling * group:

  • Application Load Balancer

  • Classic Load * Balancer

  • Gateway Load Balancer

  • Network Load * Balancer

  • VPC Lattice

This operation is * additive and does not detach existing traffic sources from the Auto Scaling * group.

After the operation completes, use the * DescribeTrafficSources API to return details about the state of the * attachments between traffic sources and your Auto Scaling group. To detach a * traffic source from the Auto Scaling group, call the DetachTrafficSources * API.

See Also:

AWS * API Reference

*/ virtual Model::AttachTrafficSourcesOutcome AttachTrafficSources(const Model::AttachTrafficSourcesRequest& request) const; /** * A Callable wrapper for AttachTrafficSources that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::AttachTrafficSourcesOutcomeCallable AttachTrafficSourcesCallable(const AttachTrafficSourcesRequestT& request) const { return SubmitCallable(&AutoScalingClient::AttachTrafficSources, request); } /** * An Async wrapper for AttachTrafficSources that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void AttachTrafficSourcesAsync(const AttachTrafficSourcesRequestT& request, const AttachTrafficSourcesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::AttachTrafficSources, request, handler, context); } /** *

Deletes one or more scheduled actions for the specified Auto Scaling * group.

See Also:

AWS * API Reference

*/ virtual Model::BatchDeleteScheduledActionOutcome BatchDeleteScheduledAction(const Model::BatchDeleteScheduledActionRequest& request) const; /** * A Callable wrapper for BatchDeleteScheduledAction that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::BatchDeleteScheduledActionOutcomeCallable BatchDeleteScheduledActionCallable(const BatchDeleteScheduledActionRequestT& request) const { return SubmitCallable(&AutoScalingClient::BatchDeleteScheduledAction, request); } /** * An Async wrapper for BatchDeleteScheduledAction that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void BatchDeleteScheduledActionAsync(const BatchDeleteScheduledActionRequestT& request, const BatchDeleteScheduledActionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::BatchDeleteScheduledAction, request, handler, context); } /** *

Creates or updates one or more scheduled scaling actions for an Auto Scaling * group.

See Also:

AWS * API Reference

*/ virtual Model::BatchPutScheduledUpdateGroupActionOutcome BatchPutScheduledUpdateGroupAction(const Model::BatchPutScheduledUpdateGroupActionRequest& request) const; /** * A Callable wrapper for BatchPutScheduledUpdateGroupAction that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::BatchPutScheduledUpdateGroupActionOutcomeCallable BatchPutScheduledUpdateGroupActionCallable(const BatchPutScheduledUpdateGroupActionRequestT& request) const { return SubmitCallable(&AutoScalingClient::BatchPutScheduledUpdateGroupAction, request); } /** * An Async wrapper for BatchPutScheduledUpdateGroupAction that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void BatchPutScheduledUpdateGroupActionAsync(const BatchPutScheduledUpdateGroupActionRequestT& request, const BatchPutScheduledUpdateGroupActionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::BatchPutScheduledUpdateGroupAction, request, handler, context); } /** *

Cancels an instance refresh or rollback that is in progress. If an instance * refresh or rollback is not in progress, an * ActiveInstanceRefreshNotFound error occurs.

This operation * is part of the instance * refresh feature in Amazon EC2 Auto Scaling, which helps you update instances * in your Auto Scaling group after you make configuration changes.

When you * cancel an instance refresh, this does not roll back any changes that it made. * Use the RollbackInstanceRefresh API to roll back instead.

See * Also:

AWS * API Reference

*/ virtual Model::CancelInstanceRefreshOutcome CancelInstanceRefresh(const Model::CancelInstanceRefreshRequest& request) const; /** * A Callable wrapper for CancelInstanceRefresh that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CancelInstanceRefreshOutcomeCallable CancelInstanceRefreshCallable(const CancelInstanceRefreshRequestT& request) const { return SubmitCallable(&AutoScalingClient::CancelInstanceRefresh, request); } /** * An Async wrapper for CancelInstanceRefresh that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CancelInstanceRefreshAsync(const CancelInstanceRefreshRequestT& request, const CancelInstanceRefreshResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::CancelInstanceRefresh, request, handler, context); } /** *

Completes the lifecycle action for the specified token or instance with the * specified result.

This step is a part of the procedure for adding a * lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a * launch template or launch configuration with a user data script that runs while * an instance is in a wait state due to a lifecycle hook.

  2. *

    (Optional) Create a Lambda function and a rule that allows Amazon EventBridge * to invoke your Lambda function when an instance is put into a wait state due to * a lifecycle hook.

  3. (Optional) Create a notification target and * an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS * topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle * notifications to the target.

  4. Create the lifecycle hook. * Specify whether the hook is used when the instances launch or terminate.

    *
  5. If you need more time, record the lifecycle action heartbeat to * keep the instance in a wait state.

  6. If you finish before * the timeout period ends, send a callback by using the * CompleteLifecycleAction API call.

For more * information, see Complete * a lifecycle action in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::CompleteLifecycleActionOutcome CompleteLifecycleAction(const Model::CompleteLifecycleActionRequest& request) const; /** * A Callable wrapper for CompleteLifecycleAction that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CompleteLifecycleActionOutcomeCallable CompleteLifecycleActionCallable(const CompleteLifecycleActionRequestT& request) const { return SubmitCallable(&AutoScalingClient::CompleteLifecycleAction, request); } /** * An Async wrapper for CompleteLifecycleAction that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CompleteLifecycleActionAsync(const CompleteLifecycleActionRequestT& request, const CompleteLifecycleActionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::CompleteLifecycleAction, request, handler, context); } /** *

We strongly recommend using a launch template when calling this operation * to ensure full functionality for Amazon EC2 Auto Scaling and Amazon EC2. *

Creates an Auto Scaling group with the specified name and attributes. *

If you exceed your maximum limit of Auto Scaling groups, the call fails. * To query this limit, call the DescribeAccountLimits API. For information * about updating this limit, see Quotas * for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User * Guide.

For introductory exercises for creating an Auto Scaling group, * see Getting * started with Amazon EC2 Auto Scaling and Tutorial: * Set up a scaled and load-balanced application in the Amazon EC2 Auto * Scaling User Guide. For more information, see Auto * Scaling groups in the Amazon EC2 Auto Scaling User Guide.

*

Every Auto Scaling group has three size properties * (DesiredCapacity, MaxSize, and MinSize). * Usually, you set these sizes based on a specific number of instances. However, * if you configure a mixed instances policy that defines weights for the instance * types, you must specify these sizes with the same units that you use for * weighting instances.

See Also:

AWS * API Reference

*/ virtual Model::CreateAutoScalingGroupOutcome CreateAutoScalingGroup(const Model::CreateAutoScalingGroupRequest& request) const; /** * A Callable wrapper for CreateAutoScalingGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateAutoScalingGroupOutcomeCallable CreateAutoScalingGroupCallable(const CreateAutoScalingGroupRequestT& request) const { return SubmitCallable(&AutoScalingClient::CreateAutoScalingGroup, request); } /** * An Async wrapper for CreateAutoScalingGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateAutoScalingGroupAsync(const CreateAutoScalingGroupRequestT& request, const CreateAutoScalingGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::CreateAutoScalingGroup, request, handler, context); } /** *

Creates a launch configuration.

If you exceed your maximum limit of * launch configurations, the call fails. To query this limit, call the * DescribeAccountLimits API. For information about updating this limit, see * Quotas * for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User * Guide.

For more information, see Launch * configurations in the Amazon EC2 Auto Scaling User Guide.

*

Amazon EC2 Auto Scaling configures instances launched as part of an Auto * Scaling group using either a launch template or a launch configuration. We * strongly recommend that you do not use launch configurations. They do not * provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. For * information about using launch templates, see Launch * templates in the Amazon EC2 Auto Scaling User Guide.

*

See Also:

AWS * API Reference

*/ virtual Model::CreateLaunchConfigurationOutcome CreateLaunchConfiguration(const Model::CreateLaunchConfigurationRequest& request) const; /** * A Callable wrapper for CreateLaunchConfiguration that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateLaunchConfigurationOutcomeCallable CreateLaunchConfigurationCallable(const CreateLaunchConfigurationRequestT& request) const { return SubmitCallable(&AutoScalingClient::CreateLaunchConfiguration, request); } /** * An Async wrapper for CreateLaunchConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateLaunchConfigurationAsync(const CreateLaunchConfigurationRequestT& request, const CreateLaunchConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::CreateLaunchConfiguration, request, handler, context); } /** *

Creates or updates tags for the specified Auto Scaling group.

When you * specify a tag with a key that already exists, the operation overwrites the * previous tag definition, and you do not get an error message.

For more * information, see Tag * Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::CreateOrUpdateTagsOutcome CreateOrUpdateTags(const Model::CreateOrUpdateTagsRequest& request) const; /** * A Callable wrapper for CreateOrUpdateTags that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateOrUpdateTagsOutcomeCallable CreateOrUpdateTagsCallable(const CreateOrUpdateTagsRequestT& request) const { return SubmitCallable(&AutoScalingClient::CreateOrUpdateTags, request); } /** * An Async wrapper for CreateOrUpdateTags that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateOrUpdateTagsAsync(const CreateOrUpdateTagsRequestT& request, const CreateOrUpdateTagsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::CreateOrUpdateTags, request, handler, context); } /** *

Deletes the specified Auto Scaling group.

If the group has instances * or scaling activities in progress, you must specify the option to force the * deletion in order for it to succeed. The force delete operation will also * terminate the EC2 instances. If the group has a warm pool, the force delete * option also deletes the warm pool.

To remove instances from the Auto * Scaling group before deleting it, call the DetachInstances API with the * list of instances and the option to decrement the desired capacity. This ensures * that Amazon EC2 Auto Scaling does not launch replacement instances.

To * terminate all instances before deleting the Auto Scaling group, call the * UpdateAutoScalingGroup API and set the minimum size and desired capacity * of the Auto Scaling group to zero.

If the group has scaling policies, * deleting the group deletes the policies, the underlying alarm actions, and any * alarm that no longer has an associated action.

For more information, see * Delete * your Auto Scaling infrastructure in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::DeleteAutoScalingGroupOutcome DeleteAutoScalingGroup(const Model::DeleteAutoScalingGroupRequest& request) const; /** * A Callable wrapper for DeleteAutoScalingGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteAutoScalingGroupOutcomeCallable DeleteAutoScalingGroupCallable(const DeleteAutoScalingGroupRequestT& request) const { return SubmitCallable(&AutoScalingClient::DeleteAutoScalingGroup, request); } /** * An Async wrapper for DeleteAutoScalingGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteAutoScalingGroupAsync(const DeleteAutoScalingGroupRequestT& request, const DeleteAutoScalingGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DeleteAutoScalingGroup, request, handler, context); } /** *

Deletes the specified launch configuration.

The launch configuration * must not be attached to an Auto Scaling group. When this call completes, the * launch configuration is no longer available for use.

See Also:

* AWS * API Reference

*/ virtual Model::DeleteLaunchConfigurationOutcome DeleteLaunchConfiguration(const Model::DeleteLaunchConfigurationRequest& request) const; /** * A Callable wrapper for DeleteLaunchConfiguration that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteLaunchConfigurationOutcomeCallable DeleteLaunchConfigurationCallable(const DeleteLaunchConfigurationRequestT& request) const { return SubmitCallable(&AutoScalingClient::DeleteLaunchConfiguration, request); } /** * An Async wrapper for DeleteLaunchConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteLaunchConfigurationAsync(const DeleteLaunchConfigurationRequestT& request, const DeleteLaunchConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DeleteLaunchConfiguration, request, handler, context); } /** *

Deletes the specified lifecycle hook.

If there are any outstanding * lifecycle actions, they are completed first (ABANDON for launching * instances, CONTINUE for terminating instances).

See * Also:

AWS * API Reference

*/ virtual Model::DeleteLifecycleHookOutcome DeleteLifecycleHook(const Model::DeleteLifecycleHookRequest& request) const; /** * A Callable wrapper for DeleteLifecycleHook that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteLifecycleHookOutcomeCallable DeleteLifecycleHookCallable(const DeleteLifecycleHookRequestT& request) const { return SubmitCallable(&AutoScalingClient::DeleteLifecycleHook, request); } /** * An Async wrapper for DeleteLifecycleHook that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteLifecycleHookAsync(const DeleteLifecycleHookRequestT& request, const DeleteLifecycleHookResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DeleteLifecycleHook, request, handler, context); } /** *

Deletes the specified notification.

See Also:

AWS * API Reference

*/ virtual Model::DeleteNotificationConfigurationOutcome DeleteNotificationConfiguration(const Model::DeleteNotificationConfigurationRequest& request) const; /** * A Callable wrapper for DeleteNotificationConfiguration that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteNotificationConfigurationOutcomeCallable DeleteNotificationConfigurationCallable(const DeleteNotificationConfigurationRequestT& request) const { return SubmitCallable(&AutoScalingClient::DeleteNotificationConfiguration, request); } /** * An Async wrapper for DeleteNotificationConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteNotificationConfigurationAsync(const DeleteNotificationConfigurationRequestT& request, const DeleteNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DeleteNotificationConfiguration, request, handler, context); } /** *

Deletes the specified scaling policy.

Deleting either a step scaling * policy or a simple scaling policy deletes the underlying alarm action, but does * not delete the alarm, even if it no longer has an associated action.

For * more information, see Deleting * a scaling policy in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::DeletePolicyOutcome DeletePolicy(const Model::DeletePolicyRequest& request) const; /** * A Callable wrapper for DeletePolicy that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeletePolicyOutcomeCallable DeletePolicyCallable(const DeletePolicyRequestT& request) const { return SubmitCallable(&AutoScalingClient::DeletePolicy, request); } /** * An Async wrapper for DeletePolicy that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeletePolicyAsync(const DeletePolicyRequestT& request, const DeletePolicyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DeletePolicy, request, handler, context); } /** *

Deletes the specified scheduled action.

See Also:

AWS * API Reference

*/ virtual Model::DeleteScheduledActionOutcome DeleteScheduledAction(const Model::DeleteScheduledActionRequest& request) const; /** * A Callable wrapper for DeleteScheduledAction that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteScheduledActionOutcomeCallable DeleteScheduledActionCallable(const DeleteScheduledActionRequestT& request) const { return SubmitCallable(&AutoScalingClient::DeleteScheduledAction, request); } /** * An Async wrapper for DeleteScheduledAction that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteScheduledActionAsync(const DeleteScheduledActionRequestT& request, const DeleteScheduledActionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DeleteScheduledAction, request, handler, context); } /** *

Deletes the specified tags.

See Also:

AWS * API Reference

*/ virtual Model::DeleteTagsOutcome DeleteTags(const Model::DeleteTagsRequest& request) const; /** * A Callable wrapper for DeleteTags that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteTagsOutcomeCallable DeleteTagsCallable(const DeleteTagsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DeleteTags, request); } /** * An Async wrapper for DeleteTags that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteTagsAsync(const DeleteTagsRequestT& request, const DeleteTagsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DeleteTags, request, handler, context); } /** *

Deletes the warm pool for the specified Auto Scaling group.

For more * information, see Warm * pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::DeleteWarmPoolOutcome DeleteWarmPool(const Model::DeleteWarmPoolRequest& request) const; /** * A Callable wrapper for DeleteWarmPool that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteWarmPoolOutcomeCallable DeleteWarmPoolCallable(const DeleteWarmPoolRequestT& request) const { return SubmitCallable(&AutoScalingClient::DeleteWarmPool, request); } /** * An Async wrapper for DeleteWarmPool that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteWarmPoolAsync(const DeleteWarmPoolRequestT& request, const DeleteWarmPoolResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DeleteWarmPool, request, handler, context); } /** *

Describes the current Amazon EC2 Auto Scaling resource quotas for your * account.

When you establish an Amazon Web Services account, the account * has initial quotas on the maximum number of Auto Scaling groups and launch * configurations that you can create in a given Region. For more information, see * Quotas * for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::DescribeAccountLimitsOutcome DescribeAccountLimits(const Model::DescribeAccountLimitsRequest& request) const; /** * A Callable wrapper for DescribeAccountLimits that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeAccountLimitsOutcomeCallable DescribeAccountLimitsCallable(const DescribeAccountLimitsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeAccountLimits, request); } /** * An Async wrapper for DescribeAccountLimits that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeAccountLimitsAsync(const DescribeAccountLimitsRequestT& request, const DescribeAccountLimitsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeAccountLimits, request, handler, context); } /** *

Describes the available adjustment types for step scaling and simple scaling * policies.

The following adjustment types are supported:

  • * ChangeInCapacity

  • ExactCapacity *

  • PercentChangeInCapacity

  • *

See Also:

AWS * API Reference

*/ virtual Model::DescribeAdjustmentTypesOutcome DescribeAdjustmentTypes(const Model::DescribeAdjustmentTypesRequest& request) const; /** * A Callable wrapper for DescribeAdjustmentTypes that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeAdjustmentTypesOutcomeCallable DescribeAdjustmentTypesCallable(const DescribeAdjustmentTypesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeAdjustmentTypes, request); } /** * An Async wrapper for DescribeAdjustmentTypes that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeAdjustmentTypesAsync(const DescribeAdjustmentTypesRequestT& request, const DescribeAdjustmentTypesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeAdjustmentTypes, request, handler, context); } /** *

Gets information about the Auto Scaling groups in the account and Region.

*

If you specify Auto Scaling group names, the output includes information for * only the specified Auto Scaling groups. If you specify filters, the output * includes information for only those Auto Scaling groups that meet the filter * criteria. If you do not specify group names or filters, the output includes * information for all Auto Scaling groups.

This operation also returns * information about instances in Auto Scaling groups. To retrieve information * about the instances in a warm pool, you must call the DescribeWarmPool * API.

See Also:

AWS * API Reference

*/ virtual Model::DescribeAutoScalingGroupsOutcome DescribeAutoScalingGroups(const Model::DescribeAutoScalingGroupsRequest& request) const; /** * A Callable wrapper for DescribeAutoScalingGroups that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeAutoScalingGroupsOutcomeCallable DescribeAutoScalingGroupsCallable(const DescribeAutoScalingGroupsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeAutoScalingGroups, request); } /** * An Async wrapper for DescribeAutoScalingGroups that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeAutoScalingGroupsAsync(const DescribeAutoScalingGroupsRequestT& request, const DescribeAutoScalingGroupsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeAutoScalingGroups, request, handler, context); } /** *

Gets information about the Auto Scaling instances in the account and * Region.

See Also:

AWS * API Reference

*/ virtual Model::DescribeAutoScalingInstancesOutcome DescribeAutoScalingInstances(const Model::DescribeAutoScalingInstancesRequest& request) const; /** * A Callable wrapper for DescribeAutoScalingInstances that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeAutoScalingInstancesOutcomeCallable DescribeAutoScalingInstancesCallable(const DescribeAutoScalingInstancesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeAutoScalingInstances, request); } /** * An Async wrapper for DescribeAutoScalingInstances that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeAutoScalingInstancesAsync(const DescribeAutoScalingInstancesRequestT& request, const DescribeAutoScalingInstancesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeAutoScalingInstances, request, handler, context); } /** *

Describes the notification types that are supported by Amazon EC2 Auto * Scaling.

See Also:

AWS * API Reference

*/ virtual Model::DescribeAutoScalingNotificationTypesOutcome DescribeAutoScalingNotificationTypes(const Model::DescribeAutoScalingNotificationTypesRequest& request) const; /** * A Callable wrapper for DescribeAutoScalingNotificationTypes that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeAutoScalingNotificationTypesOutcomeCallable DescribeAutoScalingNotificationTypesCallable(const DescribeAutoScalingNotificationTypesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeAutoScalingNotificationTypes, request); } /** * An Async wrapper for DescribeAutoScalingNotificationTypes that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeAutoScalingNotificationTypesAsync(const DescribeAutoScalingNotificationTypesRequestT& request, const DescribeAutoScalingNotificationTypesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeAutoScalingNotificationTypes, request, handler, context); } /** *

Gets information about the instance refreshes for the specified Auto Scaling * group.

This operation is part of the instance * refresh feature in Amazon EC2 Auto Scaling, which helps you update instances * in your Auto Scaling group after you make configuration changes.

To help * you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns * information about the instance refreshes you previously initiated, including * their status, start time, end time, the percentage of the instance refresh that * is complete, and the number of instances remaining to update before the instance * refresh is complete. If a rollback is initiated while an instance refresh is in * progress, Amazon EC2 Auto Scaling also returns information about the rollback of * the instance refresh.

See Also:

AWS * API Reference

*/ virtual Model::DescribeInstanceRefreshesOutcome DescribeInstanceRefreshes(const Model::DescribeInstanceRefreshesRequest& request) const; /** * A Callable wrapper for DescribeInstanceRefreshes that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeInstanceRefreshesOutcomeCallable DescribeInstanceRefreshesCallable(const DescribeInstanceRefreshesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeInstanceRefreshes, request); } /** * An Async wrapper for DescribeInstanceRefreshes that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeInstanceRefreshesAsync(const DescribeInstanceRefreshesRequestT& request, const DescribeInstanceRefreshesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeInstanceRefreshes, request, handler, context); } /** *

Gets information about the launch configurations in the account and * Region.

See Also:

AWS * API Reference

*/ virtual Model::DescribeLaunchConfigurationsOutcome DescribeLaunchConfigurations(const Model::DescribeLaunchConfigurationsRequest& request) const; /** * A Callable wrapper for DescribeLaunchConfigurations that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeLaunchConfigurationsOutcomeCallable DescribeLaunchConfigurationsCallable(const DescribeLaunchConfigurationsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeLaunchConfigurations, request); } /** * An Async wrapper for DescribeLaunchConfigurations that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeLaunchConfigurationsAsync(const DescribeLaunchConfigurationsRequestT& request, const DescribeLaunchConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeLaunchConfigurations, request, handler, context); } /** *

Describes the available types of lifecycle hooks.

The following hook * types are supported:

  • * autoscaling:EC2_INSTANCE_LAUNCHING

  • * autoscaling:EC2_INSTANCE_TERMINATING

See * Also:

AWS * API Reference

*/ virtual Model::DescribeLifecycleHookTypesOutcome DescribeLifecycleHookTypes(const Model::DescribeLifecycleHookTypesRequest& request) const; /** * A Callable wrapper for DescribeLifecycleHookTypes that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeLifecycleHookTypesOutcomeCallable DescribeLifecycleHookTypesCallable(const DescribeLifecycleHookTypesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeLifecycleHookTypes, request); } /** * An Async wrapper for DescribeLifecycleHookTypes that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeLifecycleHookTypesAsync(const DescribeLifecycleHookTypesRequestT& request, const DescribeLifecycleHookTypesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeLifecycleHookTypes, request, handler, context); } /** *

Gets information about the lifecycle hooks for the specified Auto Scaling * group.

See Also:

AWS * API Reference

*/ virtual Model::DescribeLifecycleHooksOutcome DescribeLifecycleHooks(const Model::DescribeLifecycleHooksRequest& request) const; /** * A Callable wrapper for DescribeLifecycleHooks that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeLifecycleHooksOutcomeCallable DescribeLifecycleHooksCallable(const DescribeLifecycleHooksRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeLifecycleHooks, request); } /** * An Async wrapper for DescribeLifecycleHooks that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeLifecycleHooksAsync(const DescribeLifecycleHooksRequestT& request, const DescribeLifecycleHooksResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeLifecycleHooks, request, handler, context); } /** *

This API operation is superseded by DescribeTrafficSources, * which can describe multiple traffic sources types. We recommend using * DetachTrafficSources to simplify how you manage traffic sources. * However, we continue to support DescribeLoadBalancerTargetGroups. * You can use both the original DescribeLoadBalancerTargetGroups API * operation and DescribeTrafficSources on the same Auto Scaling * group.

Gets information about the Elastic Load Balancing target * groups for the specified Auto Scaling group.

To determine the attachment * status of the target group, use the State element in the response. * When you attach a target group to an Auto Scaling group, the initial * State value is Adding. The state transitions to * Added after all Auto Scaling instances are registered with the * target group. If Elastic Load Balancing health checks are enabled for the Auto * Scaling group, the state transitions to InService after at least * one Auto Scaling instance passes the health check. When the target group is in * the InService state, Amazon EC2 Auto Scaling can terminate and * replace any instances that are reported as unhealthy. If no registered instances * pass the health checks, the target group doesn't enter the * InService state.

Target groups also have an * InService state if you attach them in the * CreateAutoScalingGroup API call. If your target group state is * InService, but it is not working properly, check the scaling * activities by calling DescribeScalingActivities and take any corrective * actions necessary.

For help with failed health checks, see Troubleshooting * Amazon EC2 Auto Scaling: Health checks in the Amazon EC2 Auto Scaling * User Guide. For more information, see Use * Elastic Load Balancing to distribute traffic across the instances in your Auto * Scaling group in the Amazon EC2 Auto Scaling User Guide.

*

You can use this operation to describe target groups that were attached by * using AttachLoadBalancerTargetGroups, but not for target groups that were * attached by using AttachTrafficSources.

See Also:

* AWS * API Reference

*/ virtual Model::DescribeLoadBalancerTargetGroupsOutcome DescribeLoadBalancerTargetGroups(const Model::DescribeLoadBalancerTargetGroupsRequest& request) const; /** * A Callable wrapper for DescribeLoadBalancerTargetGroups that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeLoadBalancerTargetGroupsOutcomeCallable DescribeLoadBalancerTargetGroupsCallable(const DescribeLoadBalancerTargetGroupsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeLoadBalancerTargetGroups, request); } /** * An Async wrapper for DescribeLoadBalancerTargetGroups that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeLoadBalancerTargetGroupsAsync(const DescribeLoadBalancerTargetGroupsRequestT& request, const DescribeLoadBalancerTargetGroupsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeLoadBalancerTargetGroups, request, handler, context); } /** *

This API operation is superseded by DescribeTrafficSources, * which can describe multiple traffic sources types. We recommend using * DescribeTrafficSources to simplify how you manage traffic sources. * However, we continue to support DescribeLoadBalancers. You can use * both the original DescribeLoadBalancers API operation and * DescribeTrafficSources on the same Auto Scaling group.

*

Gets information about the load balancers for the specified Auto Scaling * group.

This operation describes only Classic Load Balancers. If you have * Application Load Balancers, Network Load Balancers, or Gateway Load Balancers, * use the DescribeLoadBalancerTargetGroups API instead.

To determine * the attachment status of the load balancer, use the State element * in the response. When you attach a load balancer to an Auto Scaling group, the * initial State value is Adding. The state transitions * to Added after all Auto Scaling instances are registered with the * load balancer. If Elastic Load Balancing health checks are enabled for the Auto * Scaling group, the state transitions to InService after at least * one Auto Scaling instance passes the health check. When the load balancer is in * the InService state, Amazon EC2 Auto Scaling can terminate and * replace any instances that are reported as unhealthy. If no registered instances * pass the health checks, the load balancer doesn't enter the * InService state.

Load balancers also have an * InService state if you attach them in the * CreateAutoScalingGroup API call. If your load balancer state is * InService, but it is not working properly, check the scaling * activities by calling DescribeScalingActivities and take any corrective * actions necessary.

For help with failed health checks, see Troubleshooting * Amazon EC2 Auto Scaling: Health checks in the Amazon EC2 Auto Scaling * User Guide. For more information, see Use * Elastic Load Balancing to distribute traffic across the instances in your Auto * Scaling group in the Amazon EC2 Auto Scaling User Guide. *

See Also:

AWS * API Reference

*/ virtual Model::DescribeLoadBalancersOutcome DescribeLoadBalancers(const Model::DescribeLoadBalancersRequest& request) const; /** * A Callable wrapper for DescribeLoadBalancers that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeLoadBalancersOutcomeCallable DescribeLoadBalancersCallable(const DescribeLoadBalancersRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeLoadBalancers, request); } /** * An Async wrapper for DescribeLoadBalancers that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeLoadBalancersAsync(const DescribeLoadBalancersRequestT& request, const DescribeLoadBalancersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeLoadBalancers, request, handler, context); } /** *

Describes the available CloudWatch metrics for Amazon EC2 Auto * Scaling.

See Also:

AWS * API Reference

*/ virtual Model::DescribeMetricCollectionTypesOutcome DescribeMetricCollectionTypes(const Model::DescribeMetricCollectionTypesRequest& request) const; /** * A Callable wrapper for DescribeMetricCollectionTypes that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeMetricCollectionTypesOutcomeCallable DescribeMetricCollectionTypesCallable(const DescribeMetricCollectionTypesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeMetricCollectionTypes, request); } /** * An Async wrapper for DescribeMetricCollectionTypes that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeMetricCollectionTypesAsync(const DescribeMetricCollectionTypesRequestT& request, const DescribeMetricCollectionTypesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeMetricCollectionTypes, request, handler, context); } /** *

Gets information about the Amazon SNS notifications that are configured for * one or more Auto Scaling groups.

See Also:

AWS * API Reference

*/ virtual Model::DescribeNotificationConfigurationsOutcome DescribeNotificationConfigurations(const Model::DescribeNotificationConfigurationsRequest& request) const; /** * A Callable wrapper for DescribeNotificationConfigurations that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeNotificationConfigurationsOutcomeCallable DescribeNotificationConfigurationsCallable(const DescribeNotificationConfigurationsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeNotificationConfigurations, request); } /** * An Async wrapper for DescribeNotificationConfigurations that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeNotificationConfigurationsAsync(const DescribeNotificationConfigurationsRequestT& request, const DescribeNotificationConfigurationsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeNotificationConfigurations, request, handler, context); } /** *

Gets information about the scaling policies in the account and * Region.

See Also:

AWS * API Reference

*/ virtual Model::DescribePoliciesOutcome DescribePolicies(const Model::DescribePoliciesRequest& request) const; /** * A Callable wrapper for DescribePolicies that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribePoliciesOutcomeCallable DescribePoliciesCallable(const DescribePoliciesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribePolicies, request); } /** * An Async wrapper for DescribePolicies that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribePoliciesAsync(const DescribePoliciesRequestT& request, const DescribePoliciesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribePolicies, request, handler, context); } /** *

Gets information about the scaling activities in the account and Region.

*

When scaling events occur, you see a record of the scaling activity in the * scaling activities. For more information, see Verifying * a scaling activity for an Auto Scaling group in the Amazon EC2 Auto * Scaling User Guide.

If the scaling event succeeds, the value of the * StatusCode element in the response is Successful. If * an attempt to launch instances failed, the StatusCode value is * Failed or Cancelled and the StatusMessage * element in the response indicates the cause of the failure. For help * interpreting the StatusMessage, see Troubleshooting * Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. *

See Also:

AWS * API Reference

*/ virtual Model::DescribeScalingActivitiesOutcome DescribeScalingActivities(const Model::DescribeScalingActivitiesRequest& request) const; /** * A Callable wrapper for DescribeScalingActivities that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeScalingActivitiesOutcomeCallable DescribeScalingActivitiesCallable(const DescribeScalingActivitiesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeScalingActivities, request); } /** * An Async wrapper for DescribeScalingActivities that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeScalingActivitiesAsync(const DescribeScalingActivitiesRequestT& request, const DescribeScalingActivitiesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeScalingActivities, request, handler, context); } /** *

Describes the scaling process types for use with the ResumeProcesses * and SuspendProcesses APIs.

See Also:

AWS * API Reference

*/ virtual Model::DescribeScalingProcessTypesOutcome DescribeScalingProcessTypes(const Model::DescribeScalingProcessTypesRequest& request) const; /** * A Callable wrapper for DescribeScalingProcessTypes that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeScalingProcessTypesOutcomeCallable DescribeScalingProcessTypesCallable(const DescribeScalingProcessTypesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeScalingProcessTypes, request); } /** * An Async wrapper for DescribeScalingProcessTypes that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeScalingProcessTypesAsync(const DescribeScalingProcessTypesRequestT& request, const DescribeScalingProcessTypesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeScalingProcessTypes, request, handler, context); } /** *

Gets information about the scheduled actions that haven't run or that have * not reached their end time.

To describe the scaling activities for * scheduled actions that have already run, call the * DescribeScalingActivities API.

See Also:

AWS * API Reference

*/ virtual Model::DescribeScheduledActionsOutcome DescribeScheduledActions(const Model::DescribeScheduledActionsRequest& request) const; /** * A Callable wrapper for DescribeScheduledActions that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeScheduledActionsOutcomeCallable DescribeScheduledActionsCallable(const DescribeScheduledActionsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeScheduledActions, request); } /** * An Async wrapper for DescribeScheduledActions that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeScheduledActionsAsync(const DescribeScheduledActionsRequestT& request, const DescribeScheduledActionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeScheduledActions, request, handler, context); } /** *

Describes the specified tags.

You can use filters to limit the * results. For example, you can query for the tags for a specific Auto Scaling * group. You can specify multiple values for a filter. A tag must match at least * one of the specified values for it to be included in the results.

You can * also specify multiple filters. The result includes information for a particular * tag only if it matches all the filters. If there's no match, no special message * is returned.

For more information, see Tag * Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::DescribeTagsOutcome DescribeTags(const Model::DescribeTagsRequest& request) const; /** * A Callable wrapper for DescribeTags that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeTagsOutcomeCallable DescribeTagsCallable(const DescribeTagsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeTags, request); } /** * An Async wrapper for DescribeTags that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeTagsAsync(const DescribeTagsRequestT& request, const DescribeTagsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeTags, request, handler, context); } /** *

Describes the termination policies supported by Amazon EC2 Auto Scaling.

*

For more information, see Work * with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 Auto * Scaling User Guide.

See Also:

AWS * API Reference

*/ virtual Model::DescribeTerminationPolicyTypesOutcome DescribeTerminationPolicyTypes(const Model::DescribeTerminationPolicyTypesRequest& request) const; /** * A Callable wrapper for DescribeTerminationPolicyTypes that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeTerminationPolicyTypesOutcomeCallable DescribeTerminationPolicyTypesCallable(const DescribeTerminationPolicyTypesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeTerminationPolicyTypes, request); } /** * An Async wrapper for DescribeTerminationPolicyTypes that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeTerminationPolicyTypesAsync(const DescribeTerminationPolicyTypesRequestT& request, const DescribeTerminationPolicyTypesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeTerminationPolicyTypes, request, handler, context); } /** *

Gets information about the traffic sources for the specified Auto Scaling * group.

You can optionally provide a traffic source type. If you provide a * traffic source type, then the results only include that traffic source type.

*

If you do not provide a traffic source type, then the results include all the * traffic sources for the specified Auto Scaling group.

See Also:

* AWS * API Reference

*/ virtual Model::DescribeTrafficSourcesOutcome DescribeTrafficSources(const Model::DescribeTrafficSourcesRequest& request) const; /** * A Callable wrapper for DescribeTrafficSources that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeTrafficSourcesOutcomeCallable DescribeTrafficSourcesCallable(const DescribeTrafficSourcesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeTrafficSources, request); } /** * An Async wrapper for DescribeTrafficSources that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeTrafficSourcesAsync(const DescribeTrafficSourcesRequestT& request, const DescribeTrafficSourcesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeTrafficSources, request, handler, context); } /** *

Gets information about a warm pool and its instances.

For more * information, see Warm * pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::DescribeWarmPoolOutcome DescribeWarmPool(const Model::DescribeWarmPoolRequest& request) const; /** * A Callable wrapper for DescribeWarmPool that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeWarmPoolOutcomeCallable DescribeWarmPoolCallable(const DescribeWarmPoolRequestT& request) const { return SubmitCallable(&AutoScalingClient::DescribeWarmPool, request); } /** * An Async wrapper for DescribeWarmPool that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeWarmPoolAsync(const DescribeWarmPoolRequestT& request, const DescribeWarmPoolResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DescribeWarmPool, request, handler, context); } /** *

Removes one or more instances from the specified Auto Scaling group.

*

After the instances are detached, you can manage them independent of the Auto * Scaling group.

If you do not specify the option to decrement the desired * capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that * are detached.

If there is a Classic Load Balancer attached to the Auto * Scaling group, the instances are deregistered from the load balancer. If there * are target groups attached to the Auto Scaling group, the instances are * deregistered from the target groups.

For more information, see Detach * EC2 instances from your Auto Scaling group in the Amazon EC2 Auto Scaling * User Guide.

See Also:

AWS * API Reference

*/ virtual Model::DetachInstancesOutcome DetachInstances(const Model::DetachInstancesRequest& request) const; /** * A Callable wrapper for DetachInstances that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DetachInstancesOutcomeCallable DetachInstancesCallable(const DetachInstancesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DetachInstances, request); } /** * An Async wrapper for DetachInstances that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DetachInstancesAsync(const DetachInstancesRequestT& request, const DetachInstancesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DetachInstances, request, handler, context); } /** *

This API operation is superseded by DetachTrafficSources, which * can detach multiple traffic sources types. We recommend using * DetachTrafficSources to simplify how you manage traffic sources. * However, we continue to support DetachLoadBalancerTargetGroups. You * can use both the original DetachLoadBalancerTargetGroups API * operation and DetachTrafficSources on the same Auto Scaling * group.

Detaches one or more target groups from the specified Auto * Scaling group.

When you detach a target group, it enters the * Removing state while deregistering the instances in the group. When * all instances are deregistered, then you can no longer describe the target group * using the DescribeLoadBalancerTargetGroups API call. The instances remain * running.

You can use this operation to detach target groups that * were attached by using AttachLoadBalancerTargetGroups, but not for target * groups that were attached by using AttachTrafficSources.

*

See Also:

AWS * API Reference

*/ virtual Model::DetachLoadBalancerTargetGroupsOutcome DetachLoadBalancerTargetGroups(const Model::DetachLoadBalancerTargetGroupsRequest& request) const; /** * A Callable wrapper for DetachLoadBalancerTargetGroups that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DetachLoadBalancerTargetGroupsOutcomeCallable DetachLoadBalancerTargetGroupsCallable(const DetachLoadBalancerTargetGroupsRequestT& request) const { return SubmitCallable(&AutoScalingClient::DetachLoadBalancerTargetGroups, request); } /** * An Async wrapper for DetachLoadBalancerTargetGroups that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DetachLoadBalancerTargetGroupsAsync(const DetachLoadBalancerTargetGroupsRequestT& request, const DetachLoadBalancerTargetGroupsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DetachLoadBalancerTargetGroups, request, handler, context); } /** *

This API operation is superseded by DetachTrafficSources, which * can detach multiple traffic sources types. We recommend using * DetachTrafficSources to simplify how you manage traffic sources. * However, we continue to support DetachLoadBalancers. You can use * both the original DetachLoadBalancers API operation and * DetachTrafficSources on the same Auto Scaling group.

*

Detaches one or more Classic Load Balancers from the specified Auto Scaling * group.

This operation detaches only Classic Load Balancers. If you have * Application Load Balancers, Network Load Balancers, or Gateway Load Balancers, * use the DetachLoadBalancerTargetGroups API instead.

When you * detach a load balancer, it enters the Removing state while * deregistering the instances in the group. When all instances are deregistered, * then you can no longer describe the load balancer using the * DescribeLoadBalancers API call. The instances remain * running.

See Also:

AWS * API Reference

*/ virtual Model::DetachLoadBalancersOutcome DetachLoadBalancers(const Model::DetachLoadBalancersRequest& request) const; /** * A Callable wrapper for DetachLoadBalancers that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DetachLoadBalancersOutcomeCallable DetachLoadBalancersCallable(const DetachLoadBalancersRequestT& request) const { return SubmitCallable(&AutoScalingClient::DetachLoadBalancers, request); } /** * An Async wrapper for DetachLoadBalancers that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DetachLoadBalancersAsync(const DetachLoadBalancersRequestT& request, const DetachLoadBalancersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DetachLoadBalancers, request, handler, context); } /** *

Detaches one or more traffic sources from the specified Auto Scaling * group.

When you detach a traffic source, it enters the * Removing state while deregistering the instances in the group. When * all instances are deregistered, then you can no longer describe the traffic * source using the DescribeTrafficSources API call. The instances continue * to run.

See Also:

AWS * API Reference

*/ virtual Model::DetachTrafficSourcesOutcome DetachTrafficSources(const Model::DetachTrafficSourcesRequest& request) const; /** * A Callable wrapper for DetachTrafficSources that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DetachTrafficSourcesOutcomeCallable DetachTrafficSourcesCallable(const DetachTrafficSourcesRequestT& request) const { return SubmitCallable(&AutoScalingClient::DetachTrafficSources, request); } /** * An Async wrapper for DetachTrafficSources that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DetachTrafficSourcesAsync(const DetachTrafficSourcesRequestT& request, const DetachTrafficSourcesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DetachTrafficSources, request, handler, context); } /** *

Disables group metrics collection for the specified Auto Scaling * group.

See Also:

AWS * API Reference

*/ virtual Model::DisableMetricsCollectionOutcome DisableMetricsCollection(const Model::DisableMetricsCollectionRequest& request) const; /** * A Callable wrapper for DisableMetricsCollection that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DisableMetricsCollectionOutcomeCallable DisableMetricsCollectionCallable(const DisableMetricsCollectionRequestT& request) const { return SubmitCallable(&AutoScalingClient::DisableMetricsCollection, request); } /** * An Async wrapper for DisableMetricsCollection that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DisableMetricsCollectionAsync(const DisableMetricsCollectionRequestT& request, const DisableMetricsCollectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::DisableMetricsCollection, request, handler, context); } /** *

Enables group metrics collection for the specified Auto Scaling group.

*

You can use these metrics to track changes in an Auto Scaling group and to * set alarms on threshold values. You can view group metrics using the Amazon EC2 * Auto Scaling console or the CloudWatch console. For more information, see Monitor * CloudWatch metrics for your Auto Scaling groups and instances in the * Amazon EC2 Auto Scaling User Guide.

See Also:

AWS * API Reference

*/ virtual Model::EnableMetricsCollectionOutcome EnableMetricsCollection(const Model::EnableMetricsCollectionRequest& request) const; /** * A Callable wrapper for EnableMetricsCollection that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::EnableMetricsCollectionOutcomeCallable EnableMetricsCollectionCallable(const EnableMetricsCollectionRequestT& request) const { return SubmitCallable(&AutoScalingClient::EnableMetricsCollection, request); } /** * An Async wrapper for EnableMetricsCollection that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void EnableMetricsCollectionAsync(const EnableMetricsCollectionRequestT& request, const EnableMetricsCollectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::EnableMetricsCollection, request, handler, context); } /** *

Moves the specified instances into the standby state.

If you choose to * decrement the desired capacity of the Auto Scaling group, the instances can * enter standby as long as the desired capacity of the Auto Scaling group after * the instances are placed into standby is equal to or greater than the minimum * capacity of the group.

If you choose not to decrement the desired * capacity of the Auto Scaling group, the Auto Scaling group launches new * instances to replace the instances on standby.

For more information, see * Temporarily * removing instances from your Auto Scaling group in the Amazon EC2 Auto * Scaling User Guide.

See Also:

AWS * API Reference

*/ virtual Model::EnterStandbyOutcome EnterStandby(const Model::EnterStandbyRequest& request) const; /** * A Callable wrapper for EnterStandby that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::EnterStandbyOutcomeCallable EnterStandbyCallable(const EnterStandbyRequestT& request) const { return SubmitCallable(&AutoScalingClient::EnterStandby, request); } /** * An Async wrapper for EnterStandby that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void EnterStandbyAsync(const EnterStandbyRequestT& request, const EnterStandbyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::EnterStandby, request, handler, context); } /** *

Executes the specified policy. This can be useful for testing the design of * your scaling policy.

See Also:

AWS * API Reference

*/ virtual Model::ExecutePolicyOutcome ExecutePolicy(const Model::ExecutePolicyRequest& request) const; /** * A Callable wrapper for ExecutePolicy that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ExecutePolicyOutcomeCallable ExecutePolicyCallable(const ExecutePolicyRequestT& request) const { return SubmitCallable(&AutoScalingClient::ExecutePolicy, request); } /** * An Async wrapper for ExecutePolicy that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ExecutePolicyAsync(const ExecutePolicyRequestT& request, const ExecutePolicyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::ExecutePolicy, request, handler, context); } /** *

Moves the specified instances out of the standby state.

After you put * the instances back in service, the desired capacity is incremented.

For * more information, see Temporarily * removing instances from your Auto Scaling group in the Amazon EC2 Auto * Scaling User Guide.

See Also:

AWS * API Reference

*/ virtual Model::ExitStandbyOutcome ExitStandby(const Model::ExitStandbyRequest& request) const; /** * A Callable wrapper for ExitStandby that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ExitStandbyOutcomeCallable ExitStandbyCallable(const ExitStandbyRequestT& request) const { return SubmitCallable(&AutoScalingClient::ExitStandby, request); } /** * An Async wrapper for ExitStandby that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ExitStandbyAsync(const ExitStandbyRequestT& request, const ExitStandbyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::ExitStandby, request, handler, context); } /** *

Retrieves the forecast data for a predictive scaling policy.

Load * forecasts are predictions of the hourly load values using historical load data * from CloudWatch and an analysis of historical trends. Capacity forecasts are * represented as predicted values for the minimum capacity that is needed on an * hourly basis, based on the hourly load forecast.

A minimum of 24 hours of * data is required to create the initial forecasts. However, having a full 14 days * of historical data results in more accurate forecasts.

For more * information, see Predictive * scaling for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::GetPredictiveScalingForecastOutcome GetPredictiveScalingForecast(const Model::GetPredictiveScalingForecastRequest& request) const; /** * A Callable wrapper for GetPredictiveScalingForecast that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::GetPredictiveScalingForecastOutcomeCallable GetPredictiveScalingForecastCallable(const GetPredictiveScalingForecastRequestT& request) const { return SubmitCallable(&AutoScalingClient::GetPredictiveScalingForecast, request); } /** * An Async wrapper for GetPredictiveScalingForecast that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void GetPredictiveScalingForecastAsync(const GetPredictiveScalingForecastRequestT& request, const GetPredictiveScalingForecastResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::GetPredictiveScalingForecast, request, handler, context); } /** *

Creates or updates a lifecycle hook for the specified Auto Scaling group.

*

Lifecycle hooks let you create solutions that are aware of events in the Auto * Scaling instance lifecycle, and then perform a custom action on instances when * the corresponding lifecycle event occurs.

This step is a part of the * procedure for adding a lifecycle hook to an Auto Scaling group:

  1. *

    (Optional) Create a launch template or launch configuration with a user data * script that runs while an instance is in a wait state due to a lifecycle * hook.

  2. (Optional) Create a Lambda function and a rule that * allows Amazon EventBridge to invoke your Lambda function when an instance is put * into a wait state due to a lifecycle hook.

  3. (Optional) Create a * notification target and an IAM role. The target can be either an Amazon SQS * queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish * lifecycle notifications to the target.

  4. Create the * lifecycle hook. Specify whether the hook is used when the instances launch or * terminate.

  5. If you need more time, record the lifecycle * action heartbeat to keep the instance in a wait state using the * RecordLifecycleActionHeartbeat API call.

  6. If you finish * before the timeout period ends, send a callback by using the * CompleteLifecycleAction API call.

For more * information, see Amazon * EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User * Guide.

If you exceed your maximum limit of lifecycle hooks, which by * default is 50 per Auto Scaling group, the call fails.

You can view the * lifecycle hooks for an Auto Scaling group using the * DescribeLifecycleHooks API call. If you are no longer using a lifecycle * hook, you can delete it by calling the DeleteLifecycleHook * API.

See Also:

AWS * API Reference

*/ virtual Model::PutLifecycleHookOutcome PutLifecycleHook(const Model::PutLifecycleHookRequest& request) const; /** * A Callable wrapper for PutLifecycleHook that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::PutLifecycleHookOutcomeCallable PutLifecycleHookCallable(const PutLifecycleHookRequestT& request) const { return SubmitCallable(&AutoScalingClient::PutLifecycleHook, request); } /** * An Async wrapper for PutLifecycleHook that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void PutLifecycleHookAsync(const PutLifecycleHookRequestT& request, const PutLifecycleHookResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::PutLifecycleHook, request, handler, context); } /** *

Configures an Auto Scaling group to send notifications when specified events * take place. Subscribers to the specified topic can have messages delivered to an * endpoint such as a web server or an email address.

This configuration * overwrites any existing configuration.

For more information, see Getting * Amazon SNS notifications when your Auto Scaling group scales in the * Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum * limit of SNS topics, which is 10 per Auto Scaling group, the call * fails.

See Also:

AWS * API Reference

*/ virtual Model::PutNotificationConfigurationOutcome PutNotificationConfiguration(const Model::PutNotificationConfigurationRequest& request) const; /** * A Callable wrapper for PutNotificationConfiguration that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::PutNotificationConfigurationOutcomeCallable PutNotificationConfigurationCallable(const PutNotificationConfigurationRequestT& request) const { return SubmitCallable(&AutoScalingClient::PutNotificationConfiguration, request); } /** * An Async wrapper for PutNotificationConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void PutNotificationConfigurationAsync(const PutNotificationConfigurationRequestT& request, const PutNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::PutNotificationConfiguration, request, handler, context); } /** *

Creates or updates a scaling policy for an Auto Scaling group. Scaling * policies are used to scale an Auto Scaling group based on configurable metrics. * If no policies are defined, the dynamic scaling and predictive scaling features * are not used.

For more information about using dynamic scaling, see Target * tracking scaling policies and Step * and simple scaling policies in the Amazon EC2 Auto Scaling User * Guide.

For more information about using predictive scaling, see Predictive * scaling for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User * Guide.

You can view the scaling policies for an Auto Scaling group * using the DescribePolicies API call. If you are no longer using a scaling * policy, you can delete it by calling the DeletePolicy API.

See * Also:

AWS * API Reference

*/ virtual Model::PutScalingPolicyOutcome PutScalingPolicy(const Model::PutScalingPolicyRequest& request) const; /** * A Callable wrapper for PutScalingPolicy that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::PutScalingPolicyOutcomeCallable PutScalingPolicyCallable(const PutScalingPolicyRequestT& request) const { return SubmitCallable(&AutoScalingClient::PutScalingPolicy, request); } /** * An Async wrapper for PutScalingPolicy that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void PutScalingPolicyAsync(const PutScalingPolicyRequestT& request, const PutScalingPolicyResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::PutScalingPolicy, request, handler, context); } /** *

Creates or updates a scheduled scaling action for an Auto Scaling group.

*

For more information, see Scheduled * scaling in the Amazon EC2 Auto Scaling User Guide.

You can * view the scheduled actions for an Auto Scaling group using the * DescribeScheduledActions API call. If you are no longer using a scheduled * action, you can delete it by calling the DeleteScheduledAction API.

*

If you try to schedule your action in the past, Amazon EC2 Auto Scaling * returns an error message.

See Also:

AWS * API Reference

*/ virtual Model::PutScheduledUpdateGroupActionOutcome PutScheduledUpdateGroupAction(const Model::PutScheduledUpdateGroupActionRequest& request) const; /** * A Callable wrapper for PutScheduledUpdateGroupAction that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::PutScheduledUpdateGroupActionOutcomeCallable PutScheduledUpdateGroupActionCallable(const PutScheduledUpdateGroupActionRequestT& request) const { return SubmitCallable(&AutoScalingClient::PutScheduledUpdateGroupAction, request); } /** * An Async wrapper for PutScheduledUpdateGroupAction that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void PutScheduledUpdateGroupActionAsync(const PutScheduledUpdateGroupActionRequestT& request, const PutScheduledUpdateGroupActionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::PutScheduledUpdateGroupAction, request, handler, context); } /** *

Creates or updates a warm pool for the specified Auto Scaling group. A warm * pool is a pool of pre-initialized EC2 instances that sits alongside the Auto * Scaling group. Whenever your application needs to scale out, the Auto Scaling * group can draw on the warm pool to meet its new desired capacity. For more * information and example configurations, see Warm * pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User * Guide.

This operation must be called from the Region in which the * Auto Scaling group was created. This operation cannot be called on an Auto * Scaling group that has a mixed instances policy or a launch template or launch * configuration that requests Spot Instances.

You can view the instances in * the warm pool using the DescribeWarmPool API call. If you are no longer * using a warm pool, you can delete it by calling the DeleteWarmPool * API.

See Also:

AWS * API Reference

*/ virtual Model::PutWarmPoolOutcome PutWarmPool(const Model::PutWarmPoolRequest& request) const; /** * A Callable wrapper for PutWarmPool that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::PutWarmPoolOutcomeCallable PutWarmPoolCallable(const PutWarmPoolRequestT& request) const { return SubmitCallable(&AutoScalingClient::PutWarmPool, request); } /** * An Async wrapper for PutWarmPool that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void PutWarmPoolAsync(const PutWarmPoolRequestT& request, const PutWarmPoolResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::PutWarmPool, request, handler, context); } /** *

Records a heartbeat for the lifecycle action associated with the specified * token or instance. This extends the timeout by the length of time defined using * the PutLifecycleHook API call.

This step is a part of the * procedure for adding a lifecycle hook to an Auto Scaling group:

  1. *

    (Optional) Create a launch template or launch configuration with a user data * script that runs while an instance is in a wait state due to a lifecycle * hook.

  2. (Optional) Create a Lambda function and a rule that * allows Amazon EventBridge to invoke your Lambda function when an instance is put * into a wait state due to a lifecycle hook.

  3. (Optional) Create a * notification target and an IAM role. The target can be either an Amazon SQS * queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish * lifecycle notifications to the target.

  4. Create the lifecycle * hook. Specify whether the hook is used when the instances launch or * terminate.

  5. If you need more time, record the lifecycle * action heartbeat to keep the instance in a wait state.

  6. If * you finish before the timeout period ends, send a callback by using the * CompleteLifecycleAction API call.

For more * information, see Amazon * EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::RecordLifecycleActionHeartbeatOutcome RecordLifecycleActionHeartbeat(const Model::RecordLifecycleActionHeartbeatRequest& request) const; /** * A Callable wrapper for RecordLifecycleActionHeartbeat that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::RecordLifecycleActionHeartbeatOutcomeCallable RecordLifecycleActionHeartbeatCallable(const RecordLifecycleActionHeartbeatRequestT& request) const { return SubmitCallable(&AutoScalingClient::RecordLifecycleActionHeartbeat, request); } /** * An Async wrapper for RecordLifecycleActionHeartbeat that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void RecordLifecycleActionHeartbeatAsync(const RecordLifecycleActionHeartbeatRequestT& request, const RecordLifecycleActionHeartbeatResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::RecordLifecycleActionHeartbeat, request, handler, context); } /** *

Resumes the specified suspended auto scaling processes, or all suspended * process, for the specified Auto Scaling group.

For more information, see * Suspending * and resuming scaling processes in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::ResumeProcessesOutcome ResumeProcesses(const Model::ResumeProcessesRequest& request) const; /** * A Callable wrapper for ResumeProcesses that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ResumeProcessesOutcomeCallable ResumeProcessesCallable(const ResumeProcessesRequestT& request) const { return SubmitCallable(&AutoScalingClient::ResumeProcesses, request); } /** * An Async wrapper for ResumeProcesses that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ResumeProcessesAsync(const ResumeProcessesRequestT& request, const ResumeProcessesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::ResumeProcesses, request, handler, context); } /** *

Cancels an instance refresh that is in progress and rolls back any changes * that it made. Amazon EC2 Auto Scaling replaces any instances that were replaced * during the instance refresh. This restores your Auto Scaling group to the * configuration that it was using before the start of the instance refresh.

*

This operation is part of the instance * refresh feature in Amazon EC2 Auto Scaling, which helps you update instances * in your Auto Scaling group after you make configuration changes.

A * rollback is not supported in the following situations:

  • There * is no desired configuration specified for the instance refresh.

  • *

    The Auto Scaling group has a launch template that uses an Amazon Web Services * Systems Manager parameter instead of an AMI ID for the ImageId * property.

  • The Auto Scaling group uses the launch template's * $Latest or $Default version.

When * you receive a successful response from this operation, Amazon EC2 Auto Scaling * immediately begins replacing instances. You can check the status of this * operation through the DescribeInstanceRefreshes API operation. *

See Also:

AWS * API Reference

*/ virtual Model::RollbackInstanceRefreshOutcome RollbackInstanceRefresh(const Model::RollbackInstanceRefreshRequest& request) const; /** * A Callable wrapper for RollbackInstanceRefresh that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::RollbackInstanceRefreshOutcomeCallable RollbackInstanceRefreshCallable(const RollbackInstanceRefreshRequestT& request) const { return SubmitCallable(&AutoScalingClient::RollbackInstanceRefresh, request); } /** * An Async wrapper for RollbackInstanceRefresh that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void RollbackInstanceRefreshAsync(const RollbackInstanceRefreshRequestT& request, const RollbackInstanceRefreshResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::RollbackInstanceRefresh, request, handler, context); } /** *

Sets the size of the specified Auto Scaling group.

If a scale-in * activity occurs as a result of a new DesiredCapacity value that is * lower than the current size of the group, the Auto Scaling group uses its * termination policy to determine which instances to terminate.

For more * information, see Manual * scaling in the Amazon EC2 Auto Scaling User Guide.

See * Also:

AWS * API Reference

*/ virtual Model::SetDesiredCapacityOutcome SetDesiredCapacity(const Model::SetDesiredCapacityRequest& request) const; /** * A Callable wrapper for SetDesiredCapacity that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::SetDesiredCapacityOutcomeCallable SetDesiredCapacityCallable(const SetDesiredCapacityRequestT& request) const { return SubmitCallable(&AutoScalingClient::SetDesiredCapacity, request); } /** * An Async wrapper for SetDesiredCapacity that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void SetDesiredCapacityAsync(const SetDesiredCapacityRequestT& request, const SetDesiredCapacityResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::SetDesiredCapacity, request, handler, context); } /** *

Sets the health status of the specified instance.

For more * information, see Health * checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User * Guide.

See Also:

AWS * API Reference

*/ virtual Model::SetInstanceHealthOutcome SetInstanceHealth(const Model::SetInstanceHealthRequest& request) const; /** * A Callable wrapper for SetInstanceHealth that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::SetInstanceHealthOutcomeCallable SetInstanceHealthCallable(const SetInstanceHealthRequestT& request) const { return SubmitCallable(&AutoScalingClient::SetInstanceHealth, request); } /** * An Async wrapper for SetInstanceHealth that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void SetInstanceHealthAsync(const SetInstanceHealthRequestT& request, const SetInstanceHealthResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::SetInstanceHealth, request, handler, context); } /** *

Updates the instance protection settings of the specified instances. This * operation cannot be called on instances in a warm pool.

For more * information about preventing instances that are part of an Auto Scaling group * from terminating on scale in, see Using * instance scale-in protection in the Amazon EC2 Auto Scaling User * Guide.

If you exceed your maximum limit of instance IDs, which is 50 * per Auto Scaling group, the call fails.

See Also:

AWS * API Reference

*/ virtual Model::SetInstanceProtectionOutcome SetInstanceProtection(const Model::SetInstanceProtectionRequest& request) const; /** * A Callable wrapper for SetInstanceProtection that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::SetInstanceProtectionOutcomeCallable SetInstanceProtectionCallable(const SetInstanceProtectionRequestT& request) const { return SubmitCallable(&AutoScalingClient::SetInstanceProtection, request); } /** * An Async wrapper for SetInstanceProtection that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void SetInstanceProtectionAsync(const SetInstanceProtectionRequestT& request, const SetInstanceProtectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::SetInstanceProtection, request, handler, context); } /** *

Starts an instance refresh. During an instance refresh, Amazon EC2 Auto * Scaling performs a rolling update of instances in an Auto Scaling group. * Instances are terminated first and then replaced, which temporarily reduces the * capacity available within your Auto Scaling group.

This operation is part * of the instance * refresh feature in Amazon EC2 Auto Scaling, which helps you update instances * in your Auto Scaling group. This feature is helpful, for example, when you have * a new AMI or a new user data script. You just need to create a new launch * template that specifies the new AMI or user data script. Then start an instance * refresh to immediately begin the process of updating instances in the group. *

If successful, the request's response contains a unique ID that you can * use to track the progress of the instance refresh. To query its status, call the * DescribeInstanceRefreshes API. To describe the instance refreshes that * have already run, call the DescribeInstanceRefreshes API. To cancel an * instance refresh that is in progress, use the CancelInstanceRefresh API. *

An instance refresh might fail for several reasons, such as EC2 launch * failures, misconfigured health checks, or not ignoring or allowing the * termination of instances that are in Standby state or protected * from scale in. You can monitor for failed EC2 launches using the scaling * activities. To find the scaling activities, call the * DescribeScalingActivities API.

If you enable auto rollback, your * Auto Scaling group will be rolled back automatically when the instance refresh * fails. You can enable this feature before starting an instance refresh by * specifying the AutoRollback property in the instance refresh * preferences. Otherwise, to roll back an instance refresh before it finishes, use * the RollbackInstanceRefresh API.

See Also:

AWS * API Reference

*/ virtual Model::StartInstanceRefreshOutcome StartInstanceRefresh(const Model::StartInstanceRefreshRequest& request) const; /** * A Callable wrapper for StartInstanceRefresh that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::StartInstanceRefreshOutcomeCallable StartInstanceRefreshCallable(const StartInstanceRefreshRequestT& request) const { return SubmitCallable(&AutoScalingClient::StartInstanceRefresh, request); } /** * An Async wrapper for StartInstanceRefresh that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void StartInstanceRefreshAsync(const StartInstanceRefreshRequestT& request, const StartInstanceRefreshResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::StartInstanceRefresh, request, handler, context); } /** *

Suspends the specified auto scaling processes, or all processes, for the * specified Auto Scaling group.

If you suspend either the * Launch or Terminate process types, it can prevent * other process types from functioning properly. For more information, see Suspending * and resuming scaling processes in the Amazon EC2 Auto Scaling User * Guide.

To resume processes that have been suspended, call the * ResumeProcesses API.

See Also:

AWS * API Reference

*/ virtual Model::SuspendProcessesOutcome SuspendProcesses(const Model::SuspendProcessesRequest& request) const; /** * A Callable wrapper for SuspendProcesses that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::SuspendProcessesOutcomeCallable SuspendProcessesCallable(const SuspendProcessesRequestT& request) const { return SubmitCallable(&AutoScalingClient::SuspendProcesses, request); } /** * An Async wrapper for SuspendProcesses that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void SuspendProcessesAsync(const SuspendProcessesRequestT& request, const SuspendProcessesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::SuspendProcesses, request, handler, context); } /** *

Terminates the specified instance and optionally adjusts the desired group * size. This operation cannot be called on instances in a warm pool.

This * call simply makes a termination request. The instance is not terminated * immediately. When an instance is terminated, the instance status changes to * terminated. You can't connect to or start an instance after you've * terminated it.

If you do not specify the option to decrement the desired * capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that * are terminated.

By default, Amazon EC2 Auto Scaling balances instances * across all Availability Zones. If you decrement the desired capacity, your Auto * Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto * Scaling tries to rebalance the group, and rebalancing might terminate instances * in other zones. For more information, see Rebalancing * activities in the Amazon EC2 Auto Scaling User Guide.

See * Also:

AWS * API Reference

*/ virtual Model::TerminateInstanceInAutoScalingGroupOutcome TerminateInstanceInAutoScalingGroup(const Model::TerminateInstanceInAutoScalingGroupRequest& request) const; /** * A Callable wrapper for TerminateInstanceInAutoScalingGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::TerminateInstanceInAutoScalingGroupOutcomeCallable TerminateInstanceInAutoScalingGroupCallable(const TerminateInstanceInAutoScalingGroupRequestT& request) const { return SubmitCallable(&AutoScalingClient::TerminateInstanceInAutoScalingGroup, request); } /** * An Async wrapper for TerminateInstanceInAutoScalingGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void TerminateInstanceInAutoScalingGroupAsync(const TerminateInstanceInAutoScalingGroupRequestT& request, const TerminateInstanceInAutoScalingGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::TerminateInstanceInAutoScalingGroup, request, handler, context); } /** *

We strongly recommend that all Auto Scaling groups use launch templates * to ensure full functionality for Amazon EC2 Auto Scaling and Amazon EC2. *

Updates the configuration for the specified Auto Scaling group.

*

To update an Auto Scaling group, specify the name of the group and the * property that you want to change. Any properties that you don't specify are not * changed by this update request. The new settings take effect on any scaling * activities after this call returns.

If you associate a new launch * configuration or template with an Auto Scaling group, all new instances will get * the updated configuration. Existing instances continue to run with the * configuration that they were originally launched with. When you update a group * to specify a mixed instances policy instead of a launch configuration or * template, existing instances may be replaced to match the new purchasing options * that you specified in the policy. For example, if the group currently has 100% * On-Demand capacity and the policy specifies 50% Spot capacity, this means that * half of your instances will be gradually terminated and relaunched as Spot * Instances. When replacing instances, Amazon EC2 Auto Scaling launches new * instances before terminating the old ones, so that updating your group does not * compromise the performance or availability of your application.

Note the * following about changing DesiredCapacity, MaxSize, or * MinSize:

  • If a scale-in activity occurs as a result * of a new DesiredCapacity value that is lower than the current size * of the group, the Auto Scaling group uses its termination policy to determine * which instances to terminate.

  • If you specify a new value for * MinSize without specifying a value for * DesiredCapacity, and the new MinSize is larger than * the current size of the group, this sets the group's * DesiredCapacity to the new MinSize value.

  • *
  • If you specify a new value for MaxSize without specifying a * value for DesiredCapacity, and the new MaxSize is * smaller than the current size of the group, this sets the group's * DesiredCapacity to the new MaxSize value.

  • *

To see which properties have been set, call the * DescribeAutoScalingGroups API. To view the scaling policies for an Auto * Scaling group, call the DescribePolicies API. If the group has scaling * policies, you can update them by calling the PutScalingPolicy * API.

See Also:

AWS * API Reference

*/ virtual Model::UpdateAutoScalingGroupOutcome UpdateAutoScalingGroup(const Model::UpdateAutoScalingGroupRequest& request) const; /** * A Callable wrapper for UpdateAutoScalingGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UpdateAutoScalingGroupOutcomeCallable UpdateAutoScalingGroupCallable(const UpdateAutoScalingGroupRequestT& request) const { return SubmitCallable(&AutoScalingClient::UpdateAutoScalingGroup, request); } /** * An Async wrapper for UpdateAutoScalingGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UpdateAutoScalingGroupAsync(const UpdateAutoScalingGroupRequestT& request, const UpdateAutoScalingGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&AutoScalingClient::UpdateAutoScalingGroup, request, handler, context); } void OverrideEndpoint(const Aws::String& endpoint); std::shared_ptr& accessEndpointProvider(); private: friend class Aws::Client::ClientWithAsyncTemplateMethods; void init(const AutoScalingClientConfiguration& clientConfiguration); AutoScalingClientConfiguration m_clientConfiguration; std::shared_ptr m_executor; std::shared_ptr m_endpointProvider; }; } // namespace AutoScaling } // namespace Aws