/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include namespace Aws { namespace LookoutEquipment { /** *

Amazon Lookout for Equipment is a machine learning service that uses advanced * analytics to identify anomalies in machines from sensor data for use in * predictive maintenance.

*/ class AWS_LOOKOUTEQUIPMENT_API LookoutEquipmentClient : public Aws::Client::AWSJsonClient, public Aws::Client::ClientWithAsyncTemplateMethods { public: typedef Aws::Client::AWSJsonClient BASECLASS; static const char* SERVICE_NAME; static const char* ALLOCATION_TAG; typedef LookoutEquipmentClientConfiguration ClientConfigurationType; typedef LookoutEquipmentEndpointProvider EndpointProviderType; /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ LookoutEquipmentClient(const Aws::LookoutEquipment::LookoutEquipmentClientConfiguration& clientConfiguration = Aws::LookoutEquipment::LookoutEquipmentClientConfiguration(), std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG)); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ LookoutEquipmentClient(const Aws::Auth::AWSCredentials& credentials, std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG), const Aws::LookoutEquipment::LookoutEquipmentClientConfiguration& clientConfiguration = Aws::LookoutEquipment::LookoutEquipmentClientConfiguration()); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ LookoutEquipmentClient(const std::shared_ptr& credentialsProvider, std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG), const Aws::LookoutEquipment::LookoutEquipmentClientConfiguration& clientConfiguration = Aws::LookoutEquipment::LookoutEquipmentClientConfiguration()); /* Legacy constructors due deprecation */ /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ LookoutEquipmentClient(const Aws::Client::ClientConfiguration& clientConfiguration); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ LookoutEquipmentClient(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ LookoutEquipmentClient(const std::shared_ptr& credentialsProvider, const Aws::Client::ClientConfiguration& clientConfiguration); /* End of legacy constructors due deprecation */ virtual ~LookoutEquipmentClient(); /** *

Creates a container for a collection of data being ingested for analysis. The * dataset contains the metadata describing where the data is and what the data * actually looks like. In other words, it contains the location of the data * source, the data schema, and other information. A dataset also contains any tags * associated with the ingested data.

See Also:

AWS * API Reference

*/ virtual Model::CreateDatasetOutcome CreateDataset(const Model::CreateDatasetRequest& request) const; /** * A Callable wrapper for CreateDataset that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateDatasetOutcomeCallable CreateDatasetCallable(const CreateDatasetRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::CreateDataset, request); } /** * An Async wrapper for CreateDataset that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateDatasetAsync(const CreateDatasetRequestT& request, const CreateDatasetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::CreateDataset, request, handler, context); } /** *

Creates a scheduled inference. Scheduling an inference is setting up a * continuous real-time inference plan to analyze new measurement data. When * setting up the schedule, you provide an S3 bucket location for the input data, * assign it a delimiter between separate entries in the data, set an offset delay * if desired, and set the frequency of inferencing. You must also provide an S3 * bucket location for the output data.

See Also:

AWS * API Reference

*/ virtual Model::CreateInferenceSchedulerOutcome CreateInferenceScheduler(const Model::CreateInferenceSchedulerRequest& request) const; /** * A Callable wrapper for CreateInferenceScheduler that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateInferenceSchedulerOutcomeCallable CreateInferenceSchedulerCallable(const CreateInferenceSchedulerRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::CreateInferenceScheduler, request); } /** * An Async wrapper for CreateInferenceScheduler that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateInferenceSchedulerAsync(const CreateInferenceSchedulerRequestT& request, const CreateInferenceSchedulerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::CreateInferenceScheduler, request, handler, context); } /** *

Creates a label for an event.

See Also:

AWS * API Reference

*/ virtual Model::CreateLabelOutcome CreateLabel(const Model::CreateLabelRequest& request) const; /** * A Callable wrapper for CreateLabel that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateLabelOutcomeCallable CreateLabelCallable(const CreateLabelRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::CreateLabel, request); } /** * An Async wrapper for CreateLabel that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateLabelAsync(const CreateLabelRequestT& request, const CreateLabelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::CreateLabel, request, handler, context); } /** *

Creates a group of labels.

See Also:

AWS * API Reference

*/ virtual Model::CreateLabelGroupOutcome CreateLabelGroup(const Model::CreateLabelGroupRequest& request) const; /** * A Callable wrapper for CreateLabelGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateLabelGroupOutcomeCallable CreateLabelGroupCallable(const CreateLabelGroupRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::CreateLabelGroup, request); } /** * An Async wrapper for CreateLabelGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateLabelGroupAsync(const CreateLabelGroupRequestT& request, const CreateLabelGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::CreateLabelGroup, request, handler, context); } /** *

Creates an ML model for data inference.

A machine-learning (ML) model * is a mathematical model that finds patterns in your data. In Amazon Lookout for * Equipment, the model learns the patterns of normal behavior and detects abnormal * behavior that could be potential equipment failure (or maintenance events). The * models are made by analyzing normal data and abnormalities in machine behavior * that have already occurred.

Your model is trained using a portion of the * data from your dataset and uses that data to learn patterns of normal behavior * and abnormal patterns that lead to equipment failure. Another portion of the * data is used to evaluate the model's accuracy.

See Also:

AWS * API Reference

*/ virtual Model::CreateModelOutcome CreateModel(const Model::CreateModelRequest& request) const; /** * A Callable wrapper for CreateModel that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateModelOutcomeCallable CreateModelCallable(const CreateModelRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::CreateModel, request); } /** * An Async wrapper for CreateModel that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateModelAsync(const CreateModelRequestT& request, const CreateModelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::CreateModel, request, handler, context); } /** *

Deletes a dataset and associated artifacts. The operation will check to see * if any inference scheduler or data ingestion job is currently using the dataset, * and if there isn't, the dataset, its metadata, and any associated data stored in * S3 will be deleted. This does not affect any models that used this dataset for * training and evaluation, but does prevent it from being used in the future. *

See Also:

AWS * API Reference

*/ virtual Model::DeleteDatasetOutcome DeleteDataset(const Model::DeleteDatasetRequest& request) const; /** * A Callable wrapper for DeleteDataset that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteDatasetOutcomeCallable DeleteDatasetCallable(const DeleteDatasetRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DeleteDataset, request); } /** * An Async wrapper for DeleteDataset that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteDatasetAsync(const DeleteDatasetRequestT& request, const DeleteDatasetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DeleteDataset, request, handler, context); } /** *

Deletes an inference scheduler that has been set up. Already processed output * results are not affected.

See Also:

AWS * API Reference

*/ virtual Model::DeleteInferenceSchedulerOutcome DeleteInferenceScheduler(const Model::DeleteInferenceSchedulerRequest& request) const; /** * A Callable wrapper for DeleteInferenceScheduler that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteInferenceSchedulerOutcomeCallable DeleteInferenceSchedulerCallable(const DeleteInferenceSchedulerRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DeleteInferenceScheduler, request); } /** * An Async wrapper for DeleteInferenceScheduler that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteInferenceSchedulerAsync(const DeleteInferenceSchedulerRequestT& request, const DeleteInferenceSchedulerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DeleteInferenceScheduler, request, handler, context); } /** *

Deletes a label.

See Also:

AWS * API Reference

*/ virtual Model::DeleteLabelOutcome DeleteLabel(const Model::DeleteLabelRequest& request) const; /** * A Callable wrapper for DeleteLabel that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteLabelOutcomeCallable DeleteLabelCallable(const DeleteLabelRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DeleteLabel, request); } /** * An Async wrapper for DeleteLabel that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteLabelAsync(const DeleteLabelRequestT& request, const DeleteLabelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DeleteLabel, request, handler, context); } /** *

Deletes a group of labels.

See Also:

AWS * API Reference

*/ virtual Model::DeleteLabelGroupOutcome DeleteLabelGroup(const Model::DeleteLabelGroupRequest& request) const; /** * A Callable wrapper for DeleteLabelGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteLabelGroupOutcomeCallable DeleteLabelGroupCallable(const DeleteLabelGroupRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DeleteLabelGroup, request); } /** * An Async wrapper for DeleteLabelGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteLabelGroupAsync(const DeleteLabelGroupRequestT& request, const DeleteLabelGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DeleteLabelGroup, request, handler, context); } /** *

Deletes an ML model currently available for Amazon Lookout for Equipment. * This will prevent it from being used with an inference scheduler, even one that * is already set up.

See Also:

AWS * API Reference

*/ virtual Model::DeleteModelOutcome DeleteModel(const Model::DeleteModelRequest& request) const; /** * A Callable wrapper for DeleteModel that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteModelOutcomeCallable DeleteModelCallable(const DeleteModelRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DeleteModel, request); } /** * An Async wrapper for DeleteModel that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteModelAsync(const DeleteModelRequestT& request, const DeleteModelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DeleteModel, request, handler, context); } /** *

Provides information on a specific data ingestion job such as creation time, * dataset ARN, and status.

See Also:

AWS * API Reference

*/ virtual Model::DescribeDataIngestionJobOutcome DescribeDataIngestionJob(const Model::DescribeDataIngestionJobRequest& request) const; /** * A Callable wrapper for DescribeDataIngestionJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeDataIngestionJobOutcomeCallable DescribeDataIngestionJobCallable(const DescribeDataIngestionJobRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DescribeDataIngestionJob, request); } /** * An Async wrapper for DescribeDataIngestionJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeDataIngestionJobAsync(const DescribeDataIngestionJobRequestT& request, const DescribeDataIngestionJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DescribeDataIngestionJob, request, handler, context); } /** *

Provides a JSON description of the data in each time series dataset, * including names, column names, and data types.

See Also:

AWS * API Reference

*/ virtual Model::DescribeDatasetOutcome DescribeDataset(const Model::DescribeDatasetRequest& request) const; /** * A Callable wrapper for DescribeDataset that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeDatasetOutcomeCallable DescribeDatasetCallable(const DescribeDatasetRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DescribeDataset, request); } /** * An Async wrapper for DescribeDataset that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeDatasetAsync(const DescribeDatasetRequestT& request, const DescribeDatasetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DescribeDataset, request, handler, context); } /** *

Specifies information about the inference scheduler being used, including * name, model, status, and associated metadata

See Also:

AWS * API Reference

*/ virtual Model::DescribeInferenceSchedulerOutcome DescribeInferenceScheduler(const Model::DescribeInferenceSchedulerRequest& request) const; /** * A Callable wrapper for DescribeInferenceScheduler that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeInferenceSchedulerOutcomeCallable DescribeInferenceSchedulerCallable(const DescribeInferenceSchedulerRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DescribeInferenceScheduler, request); } /** * An Async wrapper for DescribeInferenceScheduler that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeInferenceSchedulerAsync(const DescribeInferenceSchedulerRequestT& request, const DescribeInferenceSchedulerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DescribeInferenceScheduler, request, handler, context); } /** *

Returns the name of the label.

See Also:

AWS * API Reference

*/ virtual Model::DescribeLabelOutcome DescribeLabel(const Model::DescribeLabelRequest& request) const; /** * A Callable wrapper for DescribeLabel that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeLabelOutcomeCallable DescribeLabelCallable(const DescribeLabelRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DescribeLabel, request); } /** * An Async wrapper for DescribeLabel that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeLabelAsync(const DescribeLabelRequestT& request, const DescribeLabelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DescribeLabel, request, handler, context); } /** *

Returns information about the label group.

See Also:

AWS * API Reference

*/ virtual Model::DescribeLabelGroupOutcome DescribeLabelGroup(const Model::DescribeLabelGroupRequest& request) const; /** * A Callable wrapper for DescribeLabelGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeLabelGroupOutcomeCallable DescribeLabelGroupCallable(const DescribeLabelGroupRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DescribeLabelGroup, request); } /** * An Async wrapper for DescribeLabelGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeLabelGroupAsync(const DescribeLabelGroupRequestT& request, const DescribeLabelGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DescribeLabelGroup, request, handler, context); } /** *

Provides a JSON containing the overall information about a specific ML model, * including model name and ARN, dataset, training and evaluation information, * status, and so on.

See Also:

AWS * API Reference

*/ virtual Model::DescribeModelOutcome DescribeModel(const Model::DescribeModelRequest& request) const; /** * A Callable wrapper for DescribeModel that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeModelOutcomeCallable DescribeModelCallable(const DescribeModelRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::DescribeModel, request); } /** * An Async wrapper for DescribeModel that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeModelAsync(const DescribeModelRequestT& request, const DescribeModelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::DescribeModel, request, handler, context); } /** *

Provides a list of all data ingestion jobs, including dataset name and ARN, * S3 location of the input data, status, and so on.

See Also:

AWS * API Reference

*/ virtual Model::ListDataIngestionJobsOutcome ListDataIngestionJobs(const Model::ListDataIngestionJobsRequest& request) const; /** * A Callable wrapper for ListDataIngestionJobs that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListDataIngestionJobsOutcomeCallable ListDataIngestionJobsCallable(const ListDataIngestionJobsRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListDataIngestionJobs, request); } /** * An Async wrapper for ListDataIngestionJobs that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListDataIngestionJobsAsync(const ListDataIngestionJobsRequestT& request, const ListDataIngestionJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListDataIngestionJobs, request, handler, context); } /** *

Lists all datasets currently available in your account, filtering on the * dataset name.

See Also:

AWS * API Reference

*/ virtual Model::ListDatasetsOutcome ListDatasets(const Model::ListDatasetsRequest& request) const; /** * A Callable wrapper for ListDatasets that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListDatasetsOutcomeCallable ListDatasetsCallable(const ListDatasetsRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListDatasets, request); } /** * An Async wrapper for ListDatasets that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListDatasetsAsync(const ListDatasetsRequestT& request, const ListDatasetsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListDatasets, request, handler, context); } /** *

Lists all inference events that have been found for the specified inference * scheduler.

See Also:

AWS * API Reference

*/ virtual Model::ListInferenceEventsOutcome ListInferenceEvents(const Model::ListInferenceEventsRequest& request) const; /** * A Callable wrapper for ListInferenceEvents that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListInferenceEventsOutcomeCallable ListInferenceEventsCallable(const ListInferenceEventsRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListInferenceEvents, request); } /** * An Async wrapper for ListInferenceEvents that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListInferenceEventsAsync(const ListInferenceEventsRequestT& request, const ListInferenceEventsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListInferenceEvents, request, handler, context); } /** *

Lists all inference executions that have been performed by the specified * inference scheduler.

See Also:

AWS * API Reference

*/ virtual Model::ListInferenceExecutionsOutcome ListInferenceExecutions(const Model::ListInferenceExecutionsRequest& request) const; /** * A Callable wrapper for ListInferenceExecutions that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListInferenceExecutionsOutcomeCallable ListInferenceExecutionsCallable(const ListInferenceExecutionsRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListInferenceExecutions, request); } /** * An Async wrapper for ListInferenceExecutions that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListInferenceExecutionsAsync(const ListInferenceExecutionsRequestT& request, const ListInferenceExecutionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListInferenceExecutions, request, handler, context); } /** *

Retrieves a list of all inference schedulers currently available for your * account.

See Also:

AWS * API Reference

*/ virtual Model::ListInferenceSchedulersOutcome ListInferenceSchedulers(const Model::ListInferenceSchedulersRequest& request) const; /** * A Callable wrapper for ListInferenceSchedulers that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListInferenceSchedulersOutcomeCallable ListInferenceSchedulersCallable(const ListInferenceSchedulersRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListInferenceSchedulers, request); } /** * An Async wrapper for ListInferenceSchedulers that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListInferenceSchedulersAsync(const ListInferenceSchedulersRequestT& request, const ListInferenceSchedulersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListInferenceSchedulers, request, handler, context); } /** *

Returns a list of the label groups.

See Also:

AWS * API Reference

*/ virtual Model::ListLabelGroupsOutcome ListLabelGroups(const Model::ListLabelGroupsRequest& request) const; /** * A Callable wrapper for ListLabelGroups that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListLabelGroupsOutcomeCallable ListLabelGroupsCallable(const ListLabelGroupsRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListLabelGroups, request); } /** * An Async wrapper for ListLabelGroups that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListLabelGroupsAsync(const ListLabelGroupsRequestT& request, const ListLabelGroupsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListLabelGroups, request, handler, context); } /** *

Provides a list of labels.

See Also:

AWS * API Reference

*/ virtual Model::ListLabelsOutcome ListLabels(const Model::ListLabelsRequest& request) const; /** * A Callable wrapper for ListLabels that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListLabelsOutcomeCallable ListLabelsCallable(const ListLabelsRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListLabels, request); } /** * An Async wrapper for ListLabels that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListLabelsAsync(const ListLabelsRequestT& request, const ListLabelsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListLabels, request, handler, context); } /** *

Generates a list of all models in the account, including model name and ARN, * dataset, and status.

See Also:

AWS * API Reference

*/ virtual Model::ListModelsOutcome ListModels(const Model::ListModelsRequest& request) const; /** * A Callable wrapper for ListModels that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListModelsOutcomeCallable ListModelsCallable(const ListModelsRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListModels, request); } /** * An Async wrapper for ListModels that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListModelsAsync(const ListModelsRequestT& request, const ListModelsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListModels, request, handler, context); } /** *

Lists statistics about the data collected for each of the sensors that have * been successfully ingested in the particular dataset. Can also be used to * retreive Sensor Statistics for a previous ingestion job.

See * Also:

AWS * API Reference

*/ virtual Model::ListSensorStatisticsOutcome ListSensorStatistics(const Model::ListSensorStatisticsRequest& request) const; /** * A Callable wrapper for ListSensorStatistics that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListSensorStatisticsOutcomeCallable ListSensorStatisticsCallable(const ListSensorStatisticsRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListSensorStatistics, request); } /** * An Async wrapper for ListSensorStatistics that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListSensorStatisticsAsync(const ListSensorStatisticsRequestT& request, const ListSensorStatisticsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListSensorStatistics, request, handler, context); } /** *

Lists all the tags for a specified resource, including key and value. *

See Also:

AWS * API Reference

*/ virtual Model::ListTagsForResourceOutcome ListTagsForResource(const Model::ListTagsForResourceRequest& request) const; /** * A Callable wrapper for ListTagsForResource that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListTagsForResourceOutcomeCallable ListTagsForResourceCallable(const ListTagsForResourceRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::ListTagsForResource, request); } /** * An Async wrapper for ListTagsForResource that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListTagsForResourceAsync(const ListTagsForResourceRequestT& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::ListTagsForResource, request, handler, context); } /** *

Starts a data ingestion job. Amazon Lookout for Equipment returns the job * status.

See Also:

AWS * API Reference

*/ virtual Model::StartDataIngestionJobOutcome StartDataIngestionJob(const Model::StartDataIngestionJobRequest& request) const; /** * A Callable wrapper for StartDataIngestionJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::StartDataIngestionJobOutcomeCallable StartDataIngestionJobCallable(const StartDataIngestionJobRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::StartDataIngestionJob, request); } /** * An Async wrapper for StartDataIngestionJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void StartDataIngestionJobAsync(const StartDataIngestionJobRequestT& request, const StartDataIngestionJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::StartDataIngestionJob, request, handler, context); } /** *

Starts an inference scheduler.

See Also:

AWS * API Reference

*/ virtual Model::StartInferenceSchedulerOutcome StartInferenceScheduler(const Model::StartInferenceSchedulerRequest& request) const; /** * A Callable wrapper for StartInferenceScheduler that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::StartInferenceSchedulerOutcomeCallable StartInferenceSchedulerCallable(const StartInferenceSchedulerRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::StartInferenceScheduler, request); } /** * An Async wrapper for StartInferenceScheduler that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void StartInferenceSchedulerAsync(const StartInferenceSchedulerRequestT& request, const StartInferenceSchedulerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::StartInferenceScheduler, request, handler, context); } /** *

Stops an inference scheduler.

See Also:

AWS * API Reference

*/ virtual Model::StopInferenceSchedulerOutcome StopInferenceScheduler(const Model::StopInferenceSchedulerRequest& request) const; /** * A Callable wrapper for StopInferenceScheduler that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::StopInferenceSchedulerOutcomeCallable StopInferenceSchedulerCallable(const StopInferenceSchedulerRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::StopInferenceScheduler, request); } /** * An Async wrapper for StopInferenceScheduler that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void StopInferenceSchedulerAsync(const StopInferenceSchedulerRequestT& request, const StopInferenceSchedulerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::StopInferenceScheduler, request, handler, context); } /** *

Associates a given tag to a resource in your account. A tag is a key-value * pair which can be added to an Amazon Lookout for Equipment resource as metadata. * Tags can be used for organizing your resources as well as helping you to search * and filter by tag. Multiple tags can be added to a resource, either when you * create it, or later. Up to 50 tags can be associated with each resource. *

See Also:

AWS * API Reference

*/ virtual Model::TagResourceOutcome TagResource(const Model::TagResourceRequest& request) const; /** * A Callable wrapper for TagResource that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::TagResourceOutcomeCallable TagResourceCallable(const TagResourceRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::TagResource, request); } /** * An Async wrapper for TagResource that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void TagResourceAsync(const TagResourceRequestT& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::TagResource, request, handler, context); } /** *

Removes a specific tag from a given resource. The tag is specified by its * key.

See Also:

AWS * API Reference

*/ virtual Model::UntagResourceOutcome UntagResource(const Model::UntagResourceRequest& request) const; /** * A Callable wrapper for UntagResource that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UntagResourceOutcomeCallable UntagResourceCallable(const UntagResourceRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::UntagResource, request); } /** * An Async wrapper for UntagResource that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UntagResourceAsync(const UntagResourceRequestT& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::UntagResource, request, handler, context); } /** *

Updates an inference scheduler.

See Also:

AWS * API Reference

*/ virtual Model::UpdateInferenceSchedulerOutcome UpdateInferenceScheduler(const Model::UpdateInferenceSchedulerRequest& request) const; /** * A Callable wrapper for UpdateInferenceScheduler that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UpdateInferenceSchedulerOutcomeCallable UpdateInferenceSchedulerCallable(const UpdateInferenceSchedulerRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::UpdateInferenceScheduler, request); } /** * An Async wrapper for UpdateInferenceScheduler that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UpdateInferenceSchedulerAsync(const UpdateInferenceSchedulerRequestT& request, const UpdateInferenceSchedulerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::UpdateInferenceScheduler, request, handler, context); } /** *

Updates the label group.

See Also:

AWS * API Reference

*/ virtual Model::UpdateLabelGroupOutcome UpdateLabelGroup(const Model::UpdateLabelGroupRequest& request) const; /** * A Callable wrapper for UpdateLabelGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UpdateLabelGroupOutcomeCallable UpdateLabelGroupCallable(const UpdateLabelGroupRequestT& request) const { return SubmitCallable(&LookoutEquipmentClient::UpdateLabelGroup, request); } /** * An Async wrapper for UpdateLabelGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UpdateLabelGroupAsync(const UpdateLabelGroupRequestT& request, const UpdateLabelGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&LookoutEquipmentClient::UpdateLabelGroup, request, handler, context); } void OverrideEndpoint(const Aws::String& endpoint); std::shared_ptr& accessEndpointProvider(); private: friend class Aws::Client::ClientWithAsyncTemplateMethods; void init(const LookoutEquipmentClientConfiguration& clientConfiguration); LookoutEquipmentClientConfiguration m_clientConfiguration; std::shared_ptr m_executor; std::shared_ptr m_endpointProvider; }; } // namespace LookoutEquipment } // namespace Aws