/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include namespace Aws { namespace Personalize { /** *

Amazon Personalize is a machine learning service that makes it easy to add * individualized recommendations to customers.

*/ class AWS_PERSONALIZE_API PersonalizeClient : public Aws::Client::AWSJsonClient, public Aws::Client::ClientWithAsyncTemplateMethods { public: typedef Aws::Client::AWSJsonClient BASECLASS; static const char* SERVICE_NAME; static const char* ALLOCATION_TAG; typedef PersonalizeClientConfiguration ClientConfigurationType; typedef PersonalizeEndpointProvider EndpointProviderType; /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ PersonalizeClient(const Aws::Personalize::PersonalizeClientConfiguration& clientConfiguration = Aws::Personalize::PersonalizeClientConfiguration(), std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG)); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ PersonalizeClient(const Aws::Auth::AWSCredentials& credentials, std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG), const Aws::Personalize::PersonalizeClientConfiguration& clientConfiguration = Aws::Personalize::PersonalizeClientConfiguration()); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ PersonalizeClient(const std::shared_ptr& credentialsProvider, std::shared_ptr endpointProvider = Aws::MakeShared(ALLOCATION_TAG), const Aws::Personalize::PersonalizeClientConfiguration& clientConfiguration = Aws::Personalize::PersonalizeClientConfiguration()); /* Legacy constructors due deprecation */ /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ PersonalizeClient(const Aws::Client::ClientConfiguration& clientConfiguration); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ PersonalizeClient(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ PersonalizeClient(const std::shared_ptr& credentialsProvider, const Aws::Client::ClientConfiguration& clientConfiguration); /* End of legacy constructors due deprecation */ virtual ~PersonalizeClient(); /** *

Creates a batch inference job. The operation can handle up to 50 million * records and the input file must be in JSON format. For more information, see Creating * a batch inference job.

See Also:

AWS * API Reference

*/ virtual Model::CreateBatchInferenceJobOutcome CreateBatchInferenceJob(const Model::CreateBatchInferenceJobRequest& request) const; /** * A Callable wrapper for CreateBatchInferenceJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateBatchInferenceJobOutcomeCallable CreateBatchInferenceJobCallable(const CreateBatchInferenceJobRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateBatchInferenceJob, request); } /** * An Async wrapper for CreateBatchInferenceJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateBatchInferenceJobAsync(const CreateBatchInferenceJobRequestT& request, const CreateBatchInferenceJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateBatchInferenceJob, request, handler, context); } /** *

Creates a batch segment job. The operation can handle up to 50 million * records and the input file must be in JSON format. For more information, see Getting * batch recommendations and user segments.

See Also:

AWS * API Reference

*/ virtual Model::CreateBatchSegmentJobOutcome CreateBatchSegmentJob(const Model::CreateBatchSegmentJobRequest& request) const; /** * A Callable wrapper for CreateBatchSegmentJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateBatchSegmentJobOutcomeCallable CreateBatchSegmentJobCallable(const CreateBatchSegmentJobRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateBatchSegmentJob, request); } /** * An Async wrapper for CreateBatchSegmentJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateBatchSegmentJobAsync(const CreateBatchSegmentJobRequestT& request, const CreateBatchSegmentJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateBatchSegmentJob, request, handler, context); } /** *

Creates a campaign that deploys a solution version. When a client calls the * GetRecommendations * and GetPersonalizedRanking * APIs, a campaign is specified in the request.

Minimum Provisioned TPS * and Auto-Scaling

A high minProvisionedTPS * will increase your bill. We recommend starting with 1 for * minProvisionedTPS (the default). Track your usage using Amazon * CloudWatch metrics, and increase the minProvisionedTPS as * necessary.

A transaction is a single * GetRecommendations or GetPersonalizedRanking call. * Transactions per second (TPS) is the throughput and unit of billing for Amazon * Personalize. The minimum provisioned TPS (minProvisionedTPS) * specifies the baseline throughput provisioned by Amazon Personalize, and thus, * the minimum billing charge.

If your TPS increases beyond * minProvisionedTPS, Amazon Personalize auto-scales the provisioned * capacity up and down, but never below minProvisionedTPS. There's a * short time delay while the capacity is increased that might cause loss of * transactions.

The actual TPS used is calculated as the average * requests/second within a 5-minute window. You pay for maximum of either the * minimum provisioned TPS or the actual TPS. We recommend starting with a low * minProvisionedTPS, track your usage using Amazon CloudWatch * metrics, and then increase the minProvisionedTPS as necessary.

*

Status

A campaign can be in one of the following states:

*
  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE * FAILED

  • DELETE PENDING > DELETE IN_PROGRESS

*

To get the campaign status, call DescribeCampaign.

*

Wait until the status of the campaign is * ACTIVE before asking the campaign for recommendations.

*

Related APIs

See Also:

AWS * API Reference

*/ virtual Model::CreateCampaignOutcome CreateCampaign(const Model::CreateCampaignRequest& request) const; /** * A Callable wrapper for CreateCampaign that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateCampaignOutcomeCallable CreateCampaignCallable(const CreateCampaignRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateCampaign, request); } /** * An Async wrapper for CreateCampaign that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateCampaignAsync(const CreateCampaignRequestT& request, const CreateCampaignResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateCampaign, request, handler, context); } /** *

Creates an empty dataset and adds it to the specified dataset group. Use CreateDatasetImportJob * to import your training data to a dataset.

There are three types of * datasets:

  • Interactions

  • Items

  • *

    Users

Each dataset type has an associated schema with * required field types. Only the Interactions dataset is required in * order to train a model (also referred to as creating a solution).

A * dataset can be in one of the following states:

  • CREATE PENDING * > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

  • DELETE * PENDING > DELETE IN_PROGRESS

To get the status of the * dataset, call DescribeDataset.

*

Related APIs

See Also:

AWS * API Reference

*/ virtual Model::CreateDatasetOutcome CreateDataset(const Model::CreateDatasetRequest& request) const; /** * A Callable wrapper for CreateDataset that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateDatasetOutcomeCallable CreateDatasetCallable(const CreateDatasetRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateDataset, request); } /** * An Async wrapper for CreateDataset that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateDatasetAsync(const CreateDatasetRequestT& request, const CreateDatasetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateDataset, request, handler, context); } /** *

Creates a job that exports data from your dataset to an Amazon S3 bucket. To * allow Amazon Personalize to export the training data, you must specify an * service-linked IAM role that gives Amazon Personalize PutObject * permissions for your Amazon S3 bucket. For information, see Exporting * a dataset in the Amazon Personalize developer guide.

Status *

A dataset export job can be in one of the following states:

    *
  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE * FAILED

To get the status of the export job, call DescribeDatasetExportJob, * and specify the Amazon Resource Name (ARN) of the dataset export job. The * dataset export is complete when the status shows as ACTIVE. If the status shows * as CREATE FAILED, the response includes a failureReason key, which * describes why the job failed.

See Also:

AWS * API Reference

*/ virtual Model::CreateDatasetExportJobOutcome CreateDatasetExportJob(const Model::CreateDatasetExportJobRequest& request) const; /** * A Callable wrapper for CreateDatasetExportJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateDatasetExportJobOutcomeCallable CreateDatasetExportJobCallable(const CreateDatasetExportJobRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateDatasetExportJob, request); } /** * An Async wrapper for CreateDatasetExportJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateDatasetExportJobAsync(const CreateDatasetExportJobRequestT& request, const CreateDatasetExportJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateDatasetExportJob, request, handler, context); } /** *

Creates an empty dataset group. A dataset group is a container for Amazon * Personalize resources. A dataset group can contain at most three datasets, one * for each type of dataset:

  • Interactions

  • *

    Items

  • Users

A dataset group can be a * Domain dataset group, where you specify a domain and use pre-configured * resources like recommenders, or a Custom dataset group, where you use custom * resources, such as a solution with a solution version, that you deploy with a * campaign. If you start with a Domain dataset group, you can still add custom * resources such as solutions and solution versions trained with recipes for * custom use cases and deployed with campaigns.

A dataset group can be in * one of the following states:

  • CREATE PENDING > CREATE * IN_PROGRESS > ACTIVE -or- CREATE FAILED

  • DELETE PENDING

    *

To get the status of the dataset group, call DescribeDatasetGroup. * If the status shows as CREATE FAILED, the response includes a * failureReason key, which describes why the creation failed.

*

You must wait until the status of the dataset group is * ACTIVE before adding a dataset to the group.

You can * specify an Key Management Service (KMS) key to encrypt the datasets in the * group. If you specify a KMS key, you must also include an Identity and Access * Management (IAM) role that has permission to access the key.

APIs that require a dataset group ARN in the request

*

Related APIs

See Also:

AWS * API Reference

*/ virtual Model::CreateDatasetGroupOutcome CreateDatasetGroup(const Model::CreateDatasetGroupRequest& request) const; /** * A Callable wrapper for CreateDatasetGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateDatasetGroupOutcomeCallable CreateDatasetGroupCallable(const CreateDatasetGroupRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateDatasetGroup, request); } /** * An Async wrapper for CreateDatasetGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateDatasetGroupAsync(const CreateDatasetGroupRequestT& request, const CreateDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateDatasetGroup, request, handler, context); } /** *

Creates a job that imports training data from your data source (an Amazon S3 * bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import * the training data, you must specify an IAM service role that has permission to * read from the data source, as Amazon Personalize makes a copy of your data and * processes it internally. For information on granting access to your Amazon S3 * bucket, see Giving * Amazon Personalize Access to Amazon S3 Resources.

By * default, a dataset import job replaces any existing data in the dataset that you * imported in bulk. To add new records without replacing existing data, specify * INCREMENTAL for the import mode in the CreateDatasetImportJob operation.

*

Status

A dataset import job can be in one of the * following states:

  • CREATE PENDING > CREATE IN_PROGRESS > * ACTIVE -or- CREATE FAILED

To get the status of the import * job, call DescribeDatasetImportJob, * providing the Amazon Resource Name (ARN) of the dataset import job. The dataset * import is complete when the status shows as ACTIVE. If the status shows as * CREATE FAILED, the response includes a failureReason key, which * describes why the job failed.

Importing takes time. You must wait * until the status shows as ACTIVE before training a model using the dataset.

*

Related APIs

See Also:

AWS * API Reference

*/ virtual Model::CreateDatasetImportJobOutcome CreateDatasetImportJob(const Model::CreateDatasetImportJobRequest& request) const; /** * A Callable wrapper for CreateDatasetImportJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateDatasetImportJobOutcomeCallable CreateDatasetImportJobCallable(const CreateDatasetImportJobRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateDatasetImportJob, request); } /** * An Async wrapper for CreateDatasetImportJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateDatasetImportJobAsync(const CreateDatasetImportJobRequestT& request, const CreateDatasetImportJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateDatasetImportJob, request, handler, context); } /** *

Creates an event tracker that you use when adding event data to a specified * dataset group using the PutEvents * API.

Only one event tracker can be associated with a dataset * group. You will get an error if you call CreateEventTracker using * the same dataset group as an existing event tracker.

When you * create an event tracker, the response includes a tracking ID, which you pass as * a parameter when you use the PutEvents * operation. Amazon Personalize then appends the event data to the Interactions * dataset of the dataset group you specify in your event tracker.

The * event tracker can be in one of the following states:

  • CREATE * PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

  • *

    DELETE PENDING > DELETE IN_PROGRESS

To get the status * of the event tracker, call DescribeEventTracker.

*

The event tracker must be in the ACTIVE state before using the * tracking ID.

Related APIs

See Also:

AWS * API Reference

*/ virtual Model::CreateEventTrackerOutcome CreateEventTracker(const Model::CreateEventTrackerRequest& request) const; /** * A Callable wrapper for CreateEventTracker that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateEventTrackerOutcomeCallable CreateEventTrackerCallable(const CreateEventTrackerRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateEventTracker, request); } /** * An Async wrapper for CreateEventTracker that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateEventTrackerAsync(const CreateEventTrackerRequestT& request, const CreateEventTrackerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateEventTracker, request, handler, context); } /** *

Creates a recommendation filter. For more information, see Filtering * recommendations and user segments.

See Also:

AWS * API Reference

*/ virtual Model::CreateFilterOutcome CreateFilter(const Model::CreateFilterRequest& request) const; /** * A Callable wrapper for CreateFilter that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateFilterOutcomeCallable CreateFilterCallable(const CreateFilterRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateFilter, request); } /** * An Async wrapper for CreateFilter that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateFilterAsync(const CreateFilterRequestT& request, const CreateFilterResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateFilter, request, handler, context); } /** *

Creates a metric attribution. A metric attribution creates reports on the * data that you import into Amazon Personalize. Depending on how you imported the * data, you can view reports in Amazon CloudWatch or Amazon S3. For more * information, see Measuring * impact of recommendations.

See Also:

AWS * API Reference

*/ virtual Model::CreateMetricAttributionOutcome CreateMetricAttribution(const Model::CreateMetricAttributionRequest& request) const; /** * A Callable wrapper for CreateMetricAttribution that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateMetricAttributionOutcomeCallable CreateMetricAttributionCallable(const CreateMetricAttributionRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateMetricAttribution, request); } /** * An Async wrapper for CreateMetricAttribution that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateMetricAttributionAsync(const CreateMetricAttributionRequestT& request, const CreateMetricAttributionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateMetricAttribution, request, handler, context); } /** *

Creates a recommender with the recipe (a Domain dataset group use case) you * specify. You create recommenders for a Domain dataset group and specify the * recommender's Amazon Resource Name (ARN) when you make a GetRecommendations * request.

Minimum recommendation requests per second

*

A high minRecommendationRequestsPerSecond will * increase your bill. We recommend starting with 1 for * minRecommendationRequestsPerSecond (the default). Track your usage * using Amazon CloudWatch metrics, and increase the * minRecommendationRequestsPerSecond as necessary.

*

When you create a recommender, you can configure the recommender's minimum * recommendation requests per second. The minimum recommendation requests per * second (minRecommendationRequestsPerSecond) specifies the baseline * recommendation request throughput provisioned by Amazon Personalize. The default * minRecommendationRequestsPerSecond is 1. A recommendation request * is a single GetRecommendations operation. Request throughput is * measured in requests per second and Amazon Personalize uses your requests per * second to derive your requests per hour and the price of your recommender usage. *

If your requests per second increases beyond * minRecommendationRequestsPerSecond, Amazon Personalize auto-scales * the provisioned capacity up and down, but never below * minRecommendationRequestsPerSecond. There's a short time delay * while the capacity is increased that might cause loss of requests.

Your * bill is the greater of either the minimum requests per hour (based on * minRecommendationRequestsPerSecond) or the actual number of requests. The actual * request throughput used is calculated as the average requests/second within a * one-hour window. We recommend starting with the default * minRecommendationRequestsPerSecond, track your usage using Amazon * CloudWatch metrics, and then increase the * minRecommendationRequestsPerSecond as necessary.

* Status

A recommender can be in one of the following states:

*
  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE * FAILED

  • STOP PENDING > STOP IN_PROGRESS > INACTIVE > * START PENDING > START IN_PROGRESS > ACTIVE

  • DELETE * PENDING > DELETE IN_PROGRESS

To get the recommender * status, call DescribeRecommender.

*

Wait until the status of the recommender is * ACTIVE before asking the recommender for recommendations.

*

Related APIs

See Also:

AWS * API Reference

*/ virtual Model::CreateRecommenderOutcome CreateRecommender(const Model::CreateRecommenderRequest& request) const; /** * A Callable wrapper for CreateRecommender that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateRecommenderOutcomeCallable CreateRecommenderCallable(const CreateRecommenderRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateRecommender, request); } /** * An Async wrapper for CreateRecommender that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateRecommenderAsync(const CreateRecommenderRequestT& request, const CreateRecommenderResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateRecommender, request, handler, context); } /** *

Creates an Amazon Personalize schema from the specified schema string. The * schema you create must be in Avro JSON format.

Amazon Personalize * recognizes three schema variants. Each schema is associated with a dataset type * and has a set of required field and keywords. If you are creating a schema for a * dataset in a Domain dataset group, you provide the domain of the Domain dataset * group. You specify a schema when you call CreateDataset.

*

Related APIs

See Also:

AWS * API Reference

*/ virtual Model::CreateSchemaOutcome CreateSchema(const Model::CreateSchemaRequest& request) const; /** * A Callable wrapper for CreateSchema that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateSchemaOutcomeCallable CreateSchemaCallable(const CreateSchemaRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateSchema, request); } /** * An Async wrapper for CreateSchema that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateSchemaAsync(const CreateSchemaRequestT& request, const CreateSchemaResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateSchema, request, handler, context); } /** *

Creates the configuration for training a model. A trained model is known as a * solution version. After the configuration is created, you train the model * (create a solution version) by calling the CreateSolutionVersion * operation. Every time you call CreateSolutionVersion, a new version * of the solution is created.

After creating a solution version, you check * its accuracy by calling GetSolutionMetrics. * When you are satisfied with the version, you deploy it using CreateCampaign. * The campaign provides recommendations to a client through the GetRecommendations * API.

To train a model, Amazon Personalize requires training data and a * recipe. The training data comes from the dataset group that you provide in the * request. A recipe specifies the training algorithm and a feature transformation. * You can specify one of the predefined recipes provided by Amazon Personalize. *

Amazon Personalize doesn't support configuring the * hpoObjective for solution hyperparameter optimization at this * time.

Status

A solution can be in one of the * following states:

  • CREATE PENDING > CREATE IN_PROGRESS > * ACTIVE -or- CREATE FAILED

  • DELETE PENDING > DELETE * IN_PROGRESS

To get the status of the solution, call DescribeSolution. * Wait until the status shows as ACTIVE before calling * CreateSolutionVersion.

Related APIs *

See Also:

AWS * API Reference

*/ virtual Model::CreateSolutionOutcome CreateSolution(const Model::CreateSolutionRequest& request) const; /** * A Callable wrapper for CreateSolution that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateSolutionOutcomeCallable CreateSolutionCallable(const CreateSolutionRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateSolution, request); } /** * An Async wrapper for CreateSolution that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateSolutionAsync(const CreateSolutionRequestT& request, const CreateSolutionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateSolution, request, handler, context); } /** *

Trains or retrains an active solution in a Custom dataset group. A solution * is created using the CreateSolution * operation and must be in the ACTIVE state before calling * CreateSolutionVersion. A new version of the solution is created * every time you call this operation.

Status

A solution * version can be in one of the following states:

  • CREATE * PENDING

  • CREATE IN_PROGRESS

  • ACTIVE

  • *
  • CREATE FAILED

  • CREATE STOPPING

  • CREATE * STOPPED

To get the status of the version, call DescribeSolutionVersion. * Wait until the status shows as ACTIVE before calling * CreateCampaign.

If the status shows as CREATE FAILED, the * response includes a failureReason key, which describes why the job * failed.

Related APIs

See Also:

AWS * API Reference

*/ virtual Model::CreateSolutionVersionOutcome CreateSolutionVersion(const Model::CreateSolutionVersionRequest& request) const; /** * A Callable wrapper for CreateSolutionVersion that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::CreateSolutionVersionOutcomeCallable CreateSolutionVersionCallable(const CreateSolutionVersionRequestT& request) const { return SubmitCallable(&PersonalizeClient::CreateSolutionVersion, request); } /** * An Async wrapper for CreateSolutionVersion that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void CreateSolutionVersionAsync(const CreateSolutionVersionRequestT& request, const CreateSolutionVersionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::CreateSolutionVersion, request, handler, context); } /** *

Removes a campaign by deleting the solution deployment. The solution that the * campaign is based on is not deleted and can be redeployed when needed. A deleted * campaign can no longer be specified in a GetRecommendations * request. For information on creating campaigns, see CreateCampaign.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteCampaignOutcome DeleteCampaign(const Model::DeleteCampaignRequest& request) const; /** * A Callable wrapper for DeleteCampaign that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteCampaignOutcomeCallable DeleteCampaignCallable(const DeleteCampaignRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteCampaign, request); } /** * An Async wrapper for DeleteCampaign that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteCampaignAsync(const DeleteCampaignRequestT& request, const DeleteCampaignResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteCampaign, request, handler, context); } /** *

Deletes a dataset. You can't delete a dataset if an associated * DatasetImportJob or SolutionVersion is in the CREATE * PENDING or IN PROGRESS state. For more information on datasets, see CreateDataset.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteDatasetOutcome DeleteDataset(const Model::DeleteDatasetRequest& request) const; /** * A Callable wrapper for DeleteDataset that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteDatasetOutcomeCallable DeleteDatasetCallable(const DeleteDatasetRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteDataset, request); } /** * An Async wrapper for DeleteDataset that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteDatasetAsync(const DeleteDatasetRequestT& request, const DeleteDatasetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteDataset, request, handler, context); } /** *

Deletes a dataset group. Before you delete a dataset group, you must delete * the following:

  • All associated event trackers.

  • *

    All associated solutions.

  • All datasets in the dataset * group.

See Also:

AWS * API Reference

*/ virtual Model::DeleteDatasetGroupOutcome DeleteDatasetGroup(const Model::DeleteDatasetGroupRequest& request) const; /** * A Callable wrapper for DeleteDatasetGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteDatasetGroupOutcomeCallable DeleteDatasetGroupCallable(const DeleteDatasetGroupRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteDatasetGroup, request); } /** * An Async wrapper for DeleteDatasetGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteDatasetGroupAsync(const DeleteDatasetGroupRequestT& request, const DeleteDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteDatasetGroup, request, handler, context); } /** *

Deletes the event tracker. Does not delete the event-interactions dataset * from the associated dataset group. For more information on event trackers, see * CreateEventTracker.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteEventTrackerOutcome DeleteEventTracker(const Model::DeleteEventTrackerRequest& request) const; /** * A Callable wrapper for DeleteEventTracker that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteEventTrackerOutcomeCallable DeleteEventTrackerCallable(const DeleteEventTrackerRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteEventTracker, request); } /** * An Async wrapper for DeleteEventTracker that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteEventTrackerAsync(const DeleteEventTrackerRequestT& request, const DeleteEventTrackerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteEventTracker, request, handler, context); } /** *

Deletes a filter.

See Also:

AWS * API Reference

*/ virtual Model::DeleteFilterOutcome DeleteFilter(const Model::DeleteFilterRequest& request) const; /** * A Callable wrapper for DeleteFilter that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteFilterOutcomeCallable DeleteFilterCallable(const DeleteFilterRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteFilter, request); } /** * An Async wrapper for DeleteFilter that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteFilterAsync(const DeleteFilterRequestT& request, const DeleteFilterResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteFilter, request, handler, context); } /** *

Deletes a metric attribution.

See Also:

AWS * API Reference

*/ virtual Model::DeleteMetricAttributionOutcome DeleteMetricAttribution(const Model::DeleteMetricAttributionRequest& request) const; /** * A Callable wrapper for DeleteMetricAttribution that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteMetricAttributionOutcomeCallable DeleteMetricAttributionCallable(const DeleteMetricAttributionRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteMetricAttribution, request); } /** * An Async wrapper for DeleteMetricAttribution that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteMetricAttributionAsync(const DeleteMetricAttributionRequestT& request, const DeleteMetricAttributionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteMetricAttribution, request, handler, context); } /** *

Deactivates and removes a recommender. A deleted recommender can no longer be * specified in a GetRecommendations * request.

See Also:

AWS * API Reference

*/ virtual Model::DeleteRecommenderOutcome DeleteRecommender(const Model::DeleteRecommenderRequest& request) const; /** * A Callable wrapper for DeleteRecommender that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteRecommenderOutcomeCallable DeleteRecommenderCallable(const DeleteRecommenderRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteRecommender, request); } /** * An Async wrapper for DeleteRecommender that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteRecommenderAsync(const DeleteRecommenderRequestT& request, const DeleteRecommenderResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteRecommender, request, handler, context); } /** *

Deletes a schema. Before deleting a schema, you must delete all datasets * referencing the schema. For more information on schemas, see CreateSchema.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteSchemaOutcome DeleteSchema(const Model::DeleteSchemaRequest& request) const; /** * A Callable wrapper for DeleteSchema that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteSchemaOutcomeCallable DeleteSchemaCallable(const DeleteSchemaRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteSchema, request); } /** * An Async wrapper for DeleteSchema that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteSchemaAsync(const DeleteSchemaRequestT& request, const DeleteSchemaResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteSchema, request, handler, context); } /** *

Deletes all versions of a solution and the Solution object * itself. Before deleting a solution, you must delete all campaigns based on the * solution. To determine what campaigns are using the solution, call ListCampaigns * and supply the Amazon Resource Name (ARN) of the solution. You can't delete a * solution if an associated SolutionVersion is in the CREATE PENDING * or IN PROGRESS state. For more information on solutions, see CreateSolution.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteSolutionOutcome DeleteSolution(const Model::DeleteSolutionRequest& request) const; /** * A Callable wrapper for DeleteSolution that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DeleteSolutionOutcomeCallable DeleteSolutionCallable(const DeleteSolutionRequestT& request) const { return SubmitCallable(&PersonalizeClient::DeleteSolution, request); } /** * An Async wrapper for DeleteSolution that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DeleteSolutionAsync(const DeleteSolutionRequestT& request, const DeleteSolutionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DeleteSolution, request, handler, context); } /** *

Describes the given algorithm.

See Also:

AWS * API Reference

*/ virtual Model::DescribeAlgorithmOutcome DescribeAlgorithm(const Model::DescribeAlgorithmRequest& request) const; /** * A Callable wrapper for DescribeAlgorithm that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeAlgorithmOutcomeCallable DescribeAlgorithmCallable(const DescribeAlgorithmRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeAlgorithm, request); } /** * An Async wrapper for DescribeAlgorithm that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeAlgorithmAsync(const DescribeAlgorithmRequestT& request, const DescribeAlgorithmResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeAlgorithm, request, handler, context); } /** *

Gets the properties of a batch inference job including name, Amazon Resource * Name (ARN), status, input and output configurations, and the ARN of the solution * version used to generate the recommendations.

See Also:

AWS * API Reference

*/ virtual Model::DescribeBatchInferenceJobOutcome DescribeBatchInferenceJob(const Model::DescribeBatchInferenceJobRequest& request) const; /** * A Callable wrapper for DescribeBatchInferenceJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeBatchInferenceJobOutcomeCallable DescribeBatchInferenceJobCallable(const DescribeBatchInferenceJobRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeBatchInferenceJob, request); } /** * An Async wrapper for DescribeBatchInferenceJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeBatchInferenceJobAsync(const DescribeBatchInferenceJobRequestT& request, const DescribeBatchInferenceJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeBatchInferenceJob, request, handler, context); } /** *

Gets the properties of a batch segment job including name, Amazon Resource * Name (ARN), status, input and output configurations, and the ARN of the solution * version used to generate segments.

See Also:

AWS * API Reference

*/ virtual Model::DescribeBatchSegmentJobOutcome DescribeBatchSegmentJob(const Model::DescribeBatchSegmentJobRequest& request) const; /** * A Callable wrapper for DescribeBatchSegmentJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeBatchSegmentJobOutcomeCallable DescribeBatchSegmentJobCallable(const DescribeBatchSegmentJobRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeBatchSegmentJob, request); } /** * An Async wrapper for DescribeBatchSegmentJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeBatchSegmentJobAsync(const DescribeBatchSegmentJobRequestT& request, const DescribeBatchSegmentJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeBatchSegmentJob, request, handler, context); } /** *

Describes the given campaign, including its status.

A campaign can be * in one of the following states:

  • CREATE PENDING > CREATE * IN_PROGRESS > ACTIVE -or- CREATE FAILED

  • DELETE PENDING > * DELETE IN_PROGRESS

When the status is * CREATE FAILED, the response includes the failureReason * key, which describes why.

For more information on campaigns, see CreateCampaign.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeCampaignOutcome DescribeCampaign(const Model::DescribeCampaignRequest& request) const; /** * A Callable wrapper for DescribeCampaign that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeCampaignOutcomeCallable DescribeCampaignCallable(const DescribeCampaignRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeCampaign, request); } /** * An Async wrapper for DescribeCampaign that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeCampaignAsync(const DescribeCampaignRequestT& request, const DescribeCampaignResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeCampaign, request, handler, context); } /** *

Describes the given dataset. For more information on datasets, see CreateDataset.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeDatasetOutcome DescribeDataset(const Model::DescribeDatasetRequest& request) const; /** * A Callable wrapper for DescribeDataset that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeDatasetOutcomeCallable DescribeDatasetCallable(const DescribeDatasetRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeDataset, request); } /** * An Async wrapper for DescribeDataset that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeDatasetAsync(const DescribeDatasetRequestT& request, const DescribeDatasetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeDataset, request, handler, context); } /** *

Describes the dataset export job created by CreateDatasetExportJob, * including the export job status.

See Also:

AWS * API Reference

*/ virtual Model::DescribeDatasetExportJobOutcome DescribeDatasetExportJob(const Model::DescribeDatasetExportJobRequest& request) const; /** * A Callable wrapper for DescribeDatasetExportJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeDatasetExportJobOutcomeCallable DescribeDatasetExportJobCallable(const DescribeDatasetExportJobRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeDatasetExportJob, request); } /** * An Async wrapper for DescribeDatasetExportJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeDatasetExportJobAsync(const DescribeDatasetExportJobRequestT& request, const DescribeDatasetExportJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeDatasetExportJob, request, handler, context); } /** *

Describes the given dataset group. For more information on dataset groups, * see CreateDatasetGroup.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeDatasetGroupOutcome DescribeDatasetGroup(const Model::DescribeDatasetGroupRequest& request) const; /** * A Callable wrapper for DescribeDatasetGroup that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeDatasetGroupOutcomeCallable DescribeDatasetGroupCallable(const DescribeDatasetGroupRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeDatasetGroup, request); } /** * An Async wrapper for DescribeDatasetGroup that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeDatasetGroupAsync(const DescribeDatasetGroupRequestT& request, const DescribeDatasetGroupResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeDatasetGroup, request, handler, context); } /** *

Describes the dataset import job created by CreateDatasetImportJob, * including the import job status.

See Also:

AWS * API Reference

*/ virtual Model::DescribeDatasetImportJobOutcome DescribeDatasetImportJob(const Model::DescribeDatasetImportJobRequest& request) const; /** * A Callable wrapper for DescribeDatasetImportJob that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeDatasetImportJobOutcomeCallable DescribeDatasetImportJobCallable(const DescribeDatasetImportJobRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeDatasetImportJob, request); } /** * An Async wrapper for DescribeDatasetImportJob that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeDatasetImportJobAsync(const DescribeDatasetImportJobRequestT& request, const DescribeDatasetImportJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeDatasetImportJob, request, handler, context); } /** *

Describes an event tracker. The response includes the trackingId * and status of the event tracker. For more information on event * trackers, see CreateEventTracker.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeEventTrackerOutcome DescribeEventTracker(const Model::DescribeEventTrackerRequest& request) const; /** * A Callable wrapper for DescribeEventTracker that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeEventTrackerOutcomeCallable DescribeEventTrackerCallable(const DescribeEventTrackerRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeEventTracker, request); } /** * An Async wrapper for DescribeEventTracker that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeEventTrackerAsync(const DescribeEventTrackerRequestT& request, const DescribeEventTrackerResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeEventTracker, request, handler, context); } /** *

Describes the given feature transformation.

See Also:

AWS * API Reference

*/ virtual Model::DescribeFeatureTransformationOutcome DescribeFeatureTransformation(const Model::DescribeFeatureTransformationRequest& request) const; /** * A Callable wrapper for DescribeFeatureTransformation that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeFeatureTransformationOutcomeCallable DescribeFeatureTransformationCallable(const DescribeFeatureTransformationRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeFeatureTransformation, request); } /** * An Async wrapper for DescribeFeatureTransformation that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeFeatureTransformationAsync(const DescribeFeatureTransformationRequestT& request, const DescribeFeatureTransformationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeFeatureTransformation, request, handler, context); } /** *

Describes a filter's properties.

See Also:

AWS * API Reference

*/ virtual Model::DescribeFilterOutcome DescribeFilter(const Model::DescribeFilterRequest& request) const; /** * A Callable wrapper for DescribeFilter that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeFilterOutcomeCallable DescribeFilterCallable(const DescribeFilterRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeFilter, request); } /** * An Async wrapper for DescribeFilter that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeFilterAsync(const DescribeFilterRequestT& request, const DescribeFilterResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeFilter, request, handler, context); } /** *

Describes a metric attribution.

See Also:

AWS * API Reference

*/ virtual Model::DescribeMetricAttributionOutcome DescribeMetricAttribution(const Model::DescribeMetricAttributionRequest& request) const; /** * A Callable wrapper for DescribeMetricAttribution that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeMetricAttributionOutcomeCallable DescribeMetricAttributionCallable(const DescribeMetricAttributionRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeMetricAttribution, request); } /** * An Async wrapper for DescribeMetricAttribution that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeMetricAttributionAsync(const DescribeMetricAttributionRequestT& request, const DescribeMetricAttributionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeMetricAttribution, request, handler, context); } /** *

Describes a recipe.

A recipe contains three items:

  • An * algorithm that trains a model.

  • Hyperparameters that govern the * training.

  • Feature transformation information for modifying the * input data before training.

Amazon Personalize provides a set * of predefined recipes. You specify a recipe when you create a solution with the * CreateSolution * API. CreateSolution trains a model by using the algorithm in the * specified recipe and a training dataset. The solution, when deployed as a * campaign, can provide recommendations using the GetRecommendations * API.

See Also:

AWS * API Reference

*/ virtual Model::DescribeRecipeOutcome DescribeRecipe(const Model::DescribeRecipeRequest& request) const; /** * A Callable wrapper for DescribeRecipe that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeRecipeOutcomeCallable DescribeRecipeCallable(const DescribeRecipeRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeRecipe, request); } /** * An Async wrapper for DescribeRecipe that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeRecipeAsync(const DescribeRecipeRequestT& request, const DescribeRecipeResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeRecipe, request, handler, context); } /** *

Describes the given recommender, including its status.

A recommender * can be in one of the following states:

  • CREATE PENDING > * CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

  • STOP PENDING * > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS * > ACTIVE

  • DELETE PENDING > DELETE IN_PROGRESS

  • *

When the status is CREATE FAILED, the * response includes the failureReason key, which describes why.

*

The modelMetrics key is null when the recommender is being * created or deleted.

For more information on recommenders, see CreateRecommender.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeRecommenderOutcome DescribeRecommender(const Model::DescribeRecommenderRequest& request) const; /** * A Callable wrapper for DescribeRecommender that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeRecommenderOutcomeCallable DescribeRecommenderCallable(const DescribeRecommenderRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeRecommender, request); } /** * An Async wrapper for DescribeRecommender that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeRecommenderAsync(const DescribeRecommenderRequestT& request, const DescribeRecommenderResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeRecommender, request, handler, context); } /** *

Describes a schema. For more information on schemas, see CreateSchema.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeSchemaOutcome DescribeSchema(const Model::DescribeSchemaRequest& request) const; /** * A Callable wrapper for DescribeSchema that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeSchemaOutcomeCallable DescribeSchemaCallable(const DescribeSchemaRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeSchema, request); } /** * An Async wrapper for DescribeSchema that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeSchemaAsync(const DescribeSchemaRequestT& request, const DescribeSchemaResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeSchema, request, handler, context); } /** *

Describes a solution. For more information on solutions, see CreateSolution.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeSolutionOutcome DescribeSolution(const Model::DescribeSolutionRequest& request) const; /** * A Callable wrapper for DescribeSolution that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeSolutionOutcomeCallable DescribeSolutionCallable(const DescribeSolutionRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeSolution, request); } /** * An Async wrapper for DescribeSolution that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeSolutionAsync(const DescribeSolutionRequestT& request, const DescribeSolutionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeSolution, request, handler, context); } /** *

Describes a specific version of a solution. For more information on * solutions, see CreateSolution *

See Also:

AWS * API Reference

*/ virtual Model::DescribeSolutionVersionOutcome DescribeSolutionVersion(const Model::DescribeSolutionVersionRequest& request) const; /** * A Callable wrapper for DescribeSolutionVersion that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::DescribeSolutionVersionOutcomeCallable DescribeSolutionVersionCallable(const DescribeSolutionVersionRequestT& request) const { return SubmitCallable(&PersonalizeClient::DescribeSolutionVersion, request); } /** * An Async wrapper for DescribeSolutionVersion that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void DescribeSolutionVersionAsync(const DescribeSolutionVersionRequestT& request, const DescribeSolutionVersionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::DescribeSolutionVersion, request, handler, context); } /** *

Gets the metrics for the specified solution version.

See Also:

* AWS * API Reference

*/ virtual Model::GetSolutionMetricsOutcome GetSolutionMetrics(const Model::GetSolutionMetricsRequest& request) const; /** * A Callable wrapper for GetSolutionMetrics that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::GetSolutionMetricsOutcomeCallable GetSolutionMetricsCallable(const GetSolutionMetricsRequestT& request) const { return SubmitCallable(&PersonalizeClient::GetSolutionMetrics, request); } /** * An Async wrapper for GetSolutionMetrics that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void GetSolutionMetricsAsync(const GetSolutionMetricsRequestT& request, const GetSolutionMetricsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::GetSolutionMetrics, request, handler, context); } /** *

Gets a list of the batch inference jobs that have been performed off of a * solution version.

See Also:

AWS * API Reference

*/ virtual Model::ListBatchInferenceJobsOutcome ListBatchInferenceJobs(const Model::ListBatchInferenceJobsRequest& request) const; /** * A Callable wrapper for ListBatchInferenceJobs that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListBatchInferenceJobsOutcomeCallable ListBatchInferenceJobsCallable(const ListBatchInferenceJobsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListBatchInferenceJobs, request); } /** * An Async wrapper for ListBatchInferenceJobs that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListBatchInferenceJobsAsync(const ListBatchInferenceJobsRequestT& request, const ListBatchInferenceJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListBatchInferenceJobs, request, handler, context); } /** *

Gets a list of the batch segment jobs that have been performed off of a * solution version that you specify.

See Also:

AWS * API Reference

*/ virtual Model::ListBatchSegmentJobsOutcome ListBatchSegmentJobs(const Model::ListBatchSegmentJobsRequest& request) const; /** * A Callable wrapper for ListBatchSegmentJobs that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListBatchSegmentJobsOutcomeCallable ListBatchSegmentJobsCallable(const ListBatchSegmentJobsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListBatchSegmentJobs, request); } /** * An Async wrapper for ListBatchSegmentJobs that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListBatchSegmentJobsAsync(const ListBatchSegmentJobsRequestT& request, const ListBatchSegmentJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListBatchSegmentJobs, request, handler, context); } /** *

Returns a list of campaigns that use the given solution. When a solution is * not specified, all the campaigns associated with the account are listed. The * response provides the properties for each campaign, including the Amazon * Resource Name (ARN). For more information on campaigns, see CreateCampaign.

See * Also:

AWS * API Reference

*/ virtual Model::ListCampaignsOutcome ListCampaigns(const Model::ListCampaignsRequest& request) const; /** * A Callable wrapper for ListCampaigns that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListCampaignsOutcomeCallable ListCampaignsCallable(const ListCampaignsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListCampaigns, request); } /** * An Async wrapper for ListCampaigns that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListCampaignsAsync(const ListCampaignsRequestT& request, const ListCampaignsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListCampaigns, request, handler, context); } /** *

Returns a list of dataset export jobs that use the given dataset. When a * dataset is not specified, all the dataset export jobs associated with the * account are listed. The response provides the properties for each dataset export * job, including the Amazon Resource Name (ARN). For more information on dataset * export jobs, see CreateDatasetExportJob. * For more information on datasets, see CreateDataset.

See * Also:

AWS * API Reference

*/ virtual Model::ListDatasetExportJobsOutcome ListDatasetExportJobs(const Model::ListDatasetExportJobsRequest& request) const; /** * A Callable wrapper for ListDatasetExportJobs that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListDatasetExportJobsOutcomeCallable ListDatasetExportJobsCallable(const ListDatasetExportJobsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListDatasetExportJobs, request); } /** * An Async wrapper for ListDatasetExportJobs that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListDatasetExportJobsAsync(const ListDatasetExportJobsRequestT& request, const ListDatasetExportJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListDatasetExportJobs, request, handler, context); } /** *

Returns a list of dataset groups. The response provides the properties for * each dataset group, including the Amazon Resource Name (ARN). For more * information on dataset groups, see CreateDatasetGroup.

See * Also:

AWS * API Reference

*/ virtual Model::ListDatasetGroupsOutcome ListDatasetGroups(const Model::ListDatasetGroupsRequest& request) const; /** * A Callable wrapper for ListDatasetGroups that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListDatasetGroupsOutcomeCallable ListDatasetGroupsCallable(const ListDatasetGroupsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListDatasetGroups, request); } /** * An Async wrapper for ListDatasetGroups that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListDatasetGroupsAsync(const ListDatasetGroupsRequestT& request, const ListDatasetGroupsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListDatasetGroups, request, handler, context); } /** *

Returns a list of dataset import jobs that use the given dataset. When a * dataset is not specified, all the dataset import jobs associated with the * account are listed. The response provides the properties for each dataset import * job, including the Amazon Resource Name (ARN). For more information on dataset * import jobs, see CreateDatasetImportJob. * For more information on datasets, see CreateDataset.

See * Also:

AWS * API Reference

*/ virtual Model::ListDatasetImportJobsOutcome ListDatasetImportJobs(const Model::ListDatasetImportJobsRequest& request) const; /** * A Callable wrapper for ListDatasetImportJobs that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListDatasetImportJobsOutcomeCallable ListDatasetImportJobsCallable(const ListDatasetImportJobsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListDatasetImportJobs, request); } /** * An Async wrapper for ListDatasetImportJobs that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListDatasetImportJobsAsync(const ListDatasetImportJobsRequestT& request, const ListDatasetImportJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListDatasetImportJobs, request, handler, context); } /** *

Returns the list of datasets contained in the given dataset group. The * response provides the properties for each dataset, including the Amazon Resource * Name (ARN). For more information on datasets, see CreateDataset.

See * Also:

AWS * API Reference

*/ virtual Model::ListDatasetsOutcome ListDatasets(const Model::ListDatasetsRequest& request) const; /** * A Callable wrapper for ListDatasets that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListDatasetsOutcomeCallable ListDatasetsCallable(const ListDatasetsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListDatasets, request); } /** * An Async wrapper for ListDatasets that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListDatasetsAsync(const ListDatasetsRequestT& request, const ListDatasetsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListDatasets, request, handler, context); } /** *

Returns the list of event trackers associated with the account. The response * provides the properties for each event tracker, including the Amazon Resource * Name (ARN) and tracking ID. For more information on event trackers, see CreateEventTracker.

See * Also:

AWS * API Reference

*/ virtual Model::ListEventTrackersOutcome ListEventTrackers(const Model::ListEventTrackersRequest& request) const; /** * A Callable wrapper for ListEventTrackers that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListEventTrackersOutcomeCallable ListEventTrackersCallable(const ListEventTrackersRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListEventTrackers, request); } /** * An Async wrapper for ListEventTrackers that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListEventTrackersAsync(const ListEventTrackersRequestT& request, const ListEventTrackersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListEventTrackers, request, handler, context); } /** *

Lists all filters that belong to a given dataset group.

See * Also:

AWS * API Reference

*/ virtual Model::ListFiltersOutcome ListFilters(const Model::ListFiltersRequest& request) const; /** * A Callable wrapper for ListFilters that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListFiltersOutcomeCallable ListFiltersCallable(const ListFiltersRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListFilters, request); } /** * An Async wrapper for ListFilters that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListFiltersAsync(const ListFiltersRequestT& request, const ListFiltersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListFilters, request, handler, context); } /** *

Lists the metrics for the metric attribution.

See Also:

AWS * API Reference

*/ virtual Model::ListMetricAttributionMetricsOutcome ListMetricAttributionMetrics(const Model::ListMetricAttributionMetricsRequest& request) const; /** * A Callable wrapper for ListMetricAttributionMetrics that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListMetricAttributionMetricsOutcomeCallable ListMetricAttributionMetricsCallable(const ListMetricAttributionMetricsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListMetricAttributionMetrics, request); } /** * An Async wrapper for ListMetricAttributionMetrics that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListMetricAttributionMetricsAsync(const ListMetricAttributionMetricsRequestT& request, const ListMetricAttributionMetricsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListMetricAttributionMetrics, request, handler, context); } /** *

Lists metric attributions.

See Also:

AWS * API Reference

*/ virtual Model::ListMetricAttributionsOutcome ListMetricAttributions(const Model::ListMetricAttributionsRequest& request) const; /** * A Callable wrapper for ListMetricAttributions that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListMetricAttributionsOutcomeCallable ListMetricAttributionsCallable(const ListMetricAttributionsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListMetricAttributions, request); } /** * An Async wrapper for ListMetricAttributions that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListMetricAttributionsAsync(const ListMetricAttributionsRequestT& request, const ListMetricAttributionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListMetricAttributions, request, handler, context); } /** *

Returns a list of available recipes. The response provides the properties for * each recipe, including the recipe's Amazon Resource Name (ARN).

See * Also:

AWS * API Reference

*/ virtual Model::ListRecipesOutcome ListRecipes(const Model::ListRecipesRequest& request) const; /** * A Callable wrapper for ListRecipes that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListRecipesOutcomeCallable ListRecipesCallable(const ListRecipesRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListRecipes, request); } /** * An Async wrapper for ListRecipes that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListRecipesAsync(const ListRecipesRequestT& request, const ListRecipesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListRecipes, request, handler, context); } /** *

Returns a list of recommenders in a given Domain dataset group. When a Domain * dataset group is not specified, all the recommenders associated with the account * are listed. The response provides the properties for each recommender, including * the Amazon Resource Name (ARN). For more information on recommenders, see CreateRecommender.

See * Also:

AWS * API Reference

*/ virtual Model::ListRecommendersOutcome ListRecommenders(const Model::ListRecommendersRequest& request) const; /** * A Callable wrapper for ListRecommenders that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListRecommendersOutcomeCallable ListRecommendersCallable(const ListRecommendersRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListRecommenders, request); } /** * An Async wrapper for ListRecommenders that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListRecommendersAsync(const ListRecommendersRequestT& request, const ListRecommendersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListRecommenders, request, handler, context); } /** *

Returns the list of schemas associated with the account. The response * provides the properties for each schema, including the Amazon Resource Name * (ARN). For more information on schemas, see CreateSchema.

See * Also:

AWS * API Reference

*/ virtual Model::ListSchemasOutcome ListSchemas(const Model::ListSchemasRequest& request) const; /** * A Callable wrapper for ListSchemas that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListSchemasOutcomeCallable ListSchemasCallable(const ListSchemasRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListSchemas, request); } /** * An Async wrapper for ListSchemas that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListSchemasAsync(const ListSchemasRequestT& request, const ListSchemasResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListSchemas, request, handler, context); } /** *

Returns a list of solution versions for the given solution. When a solution * is not specified, all the solution versions associated with the account are * listed. The response provides the properties for each solution version, * including the Amazon Resource Name (ARN).

See Also:

AWS * API Reference

*/ virtual Model::ListSolutionVersionsOutcome ListSolutionVersions(const Model::ListSolutionVersionsRequest& request) const; /** * A Callable wrapper for ListSolutionVersions that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListSolutionVersionsOutcomeCallable ListSolutionVersionsCallable(const ListSolutionVersionsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListSolutionVersions, request); } /** * An Async wrapper for ListSolutionVersions that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListSolutionVersionsAsync(const ListSolutionVersionsRequestT& request, const ListSolutionVersionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListSolutionVersions, request, handler, context); } /** *

Returns a list of solutions that use the given dataset group. When a dataset * group is not specified, all the solutions associated with the account are * listed. The response provides the properties for each solution, including the * Amazon Resource Name (ARN). For more information on solutions, see CreateSolution.

See * Also:

AWS * API Reference

*/ virtual Model::ListSolutionsOutcome ListSolutions(const Model::ListSolutionsRequest& request) const; /** * A Callable wrapper for ListSolutions that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListSolutionsOutcomeCallable ListSolutionsCallable(const ListSolutionsRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListSolutions, request); } /** * An Async wrapper for ListSolutions that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListSolutionsAsync(const ListSolutionsRequestT& request, const ListSolutionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListSolutions, request, handler, context); } /** *

Get a list of tags * attached to a resource.

See Also:

AWS * API Reference

*/ virtual Model::ListTagsForResourceOutcome ListTagsForResource(const Model::ListTagsForResourceRequest& request) const; /** * A Callable wrapper for ListTagsForResource that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::ListTagsForResourceOutcomeCallable ListTagsForResourceCallable(const ListTagsForResourceRequestT& request) const { return SubmitCallable(&PersonalizeClient::ListTagsForResource, request); } /** * An Async wrapper for ListTagsForResource that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void ListTagsForResourceAsync(const ListTagsForResourceRequestT& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::ListTagsForResource, request, handler, context); } /** *

Starts a recommender that is INACTIVE. Starting a recommender does not create * any new models, but resumes billing and automatic retraining for the * recommender.

See Also:

AWS * API Reference

*/ virtual Model::StartRecommenderOutcome StartRecommender(const Model::StartRecommenderRequest& request) const; /** * A Callable wrapper for StartRecommender that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::StartRecommenderOutcomeCallable StartRecommenderCallable(const StartRecommenderRequestT& request) const { return SubmitCallable(&PersonalizeClient::StartRecommender, request); } /** * An Async wrapper for StartRecommender that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void StartRecommenderAsync(const StartRecommenderRequestT& request, const StartRecommenderResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::StartRecommender, request, handler, context); } /** *

Stops a recommender that is ACTIVE. Stopping a recommender halts billing and * automatic retraining for the recommender.

See Also:

AWS * API Reference

*/ virtual Model::StopRecommenderOutcome StopRecommender(const Model::StopRecommenderRequest& request) const; /** * A Callable wrapper for StopRecommender that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::StopRecommenderOutcomeCallable StopRecommenderCallable(const StopRecommenderRequestT& request) const { return SubmitCallable(&PersonalizeClient::StopRecommender, request); } /** * An Async wrapper for StopRecommender that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void StopRecommenderAsync(const StopRecommenderRequestT& request, const StopRecommenderResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::StopRecommender, request, handler, context); } /** *

Stops creating a solution version that is in a state of CREATE_PENDING or * CREATE IN_PROGRESS.

Depending on the current state of the solution * version, the solution version state changes as follows:

  • *

    CREATE_PENDING > CREATE_STOPPED

    or

  • *

    CREATE_IN_PROGRESS > CREATE_STOPPING > CREATE_STOPPED

*

You are billed for all of the training completed up until you stop the * solution version creation. You cannot resume creating a solution version once it * has been stopped.

See Also:

AWS * API Reference

*/ virtual Model::StopSolutionVersionCreationOutcome StopSolutionVersionCreation(const Model::StopSolutionVersionCreationRequest& request) const; /** * A Callable wrapper for StopSolutionVersionCreation that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::StopSolutionVersionCreationOutcomeCallable StopSolutionVersionCreationCallable(const StopSolutionVersionCreationRequestT& request) const { return SubmitCallable(&PersonalizeClient::StopSolutionVersionCreation, request); } /** * An Async wrapper for StopSolutionVersionCreation that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void StopSolutionVersionCreationAsync(const StopSolutionVersionCreationRequestT& request, const StopSolutionVersionCreationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::StopSolutionVersionCreation, request, handler, context); } /** *

Add a list of tags to a resource.

See Also:

AWS * API Reference

*/ virtual Model::TagResourceOutcome TagResource(const Model::TagResourceRequest& request) const; /** * A Callable wrapper for TagResource that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::TagResourceOutcomeCallable TagResourceCallable(const TagResourceRequestT& request) const { return SubmitCallable(&PersonalizeClient::TagResource, request); } /** * An Async wrapper for TagResource that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void TagResourceAsync(const TagResourceRequestT& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::TagResource, request, handler, context); } /** *

Remove tags * that are attached to a resource.

See Also:

AWS * API Reference

*/ virtual Model::UntagResourceOutcome UntagResource(const Model::UntagResourceRequest& request) const; /** * A Callable wrapper for UntagResource that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UntagResourceOutcomeCallable UntagResourceCallable(const UntagResourceRequestT& request) const { return SubmitCallable(&PersonalizeClient::UntagResource, request); } /** * An Async wrapper for UntagResource that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UntagResourceAsync(const UntagResourceRequestT& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::UntagResource, request, handler, context); } /** *

Updates a campaign by either deploying a new solution or changing the value * of the campaign's minProvisionedTPS parameter.

To update a * campaign, the campaign status must be ACTIVE or CREATE FAILED. Check the * campaign status using the DescribeCampaign * operation.

You can still get recommendations from a campaign while * an update is in progress. The campaign will use the previous solution version * and campaign configuration to generate recommendations until the latest campaign * update status is Active.

For more information on * campaigns, see CreateCampaign.

See * Also:

AWS * API Reference

*/ virtual Model::UpdateCampaignOutcome UpdateCampaign(const Model::UpdateCampaignRequest& request) const; /** * A Callable wrapper for UpdateCampaign that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UpdateCampaignOutcomeCallable UpdateCampaignCallable(const UpdateCampaignRequestT& request) const { return SubmitCallable(&PersonalizeClient::UpdateCampaign, request); } /** * An Async wrapper for UpdateCampaign that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UpdateCampaignAsync(const UpdateCampaignRequestT& request, const UpdateCampaignResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::UpdateCampaign, request, handler, context); } /** *

Update a dataset to replace its schema with a new or existing one. For more * information, see Replacing * a dataset's schema.

See Also:

AWS * API Reference

*/ virtual Model::UpdateDatasetOutcome UpdateDataset(const Model::UpdateDatasetRequest& request) const; /** * A Callable wrapper for UpdateDataset that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UpdateDatasetOutcomeCallable UpdateDatasetCallable(const UpdateDatasetRequestT& request) const { return SubmitCallable(&PersonalizeClient::UpdateDataset, request); } /** * An Async wrapper for UpdateDataset that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UpdateDatasetAsync(const UpdateDatasetRequestT& request, const UpdateDatasetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::UpdateDataset, request, handler, context); } /** *

Updates a metric attribution.

See Also:

AWS * API Reference

*/ virtual Model::UpdateMetricAttributionOutcome UpdateMetricAttribution(const Model::UpdateMetricAttributionRequest& request) const; /** * A Callable wrapper for UpdateMetricAttribution that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UpdateMetricAttributionOutcomeCallable UpdateMetricAttributionCallable(const UpdateMetricAttributionRequestT& request) const { return SubmitCallable(&PersonalizeClient::UpdateMetricAttribution, request); } /** * An Async wrapper for UpdateMetricAttribution that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UpdateMetricAttributionAsync(const UpdateMetricAttributionRequestT& request, const UpdateMetricAttributionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::UpdateMetricAttribution, request, handler, context); } /** *

Updates the recommender to modify the recommender configuration. If you * update the recommender to modify the columns used in training, Amazon * Personalize automatically starts a full retraining of the models backing your * recommender. While the update completes, you can still get recommendations from * the recommender. The recommender uses the previous configuration until the * update completes. To track the status of this update, use the * latestRecommenderUpdate returned in the DescribeRecommender * operation.

See Also:

AWS * API Reference

*/ virtual Model::UpdateRecommenderOutcome UpdateRecommender(const Model::UpdateRecommenderRequest& request) const; /** * A Callable wrapper for UpdateRecommender that returns a future to the operation so that it can be executed in parallel to other requests. */ template Model::UpdateRecommenderOutcomeCallable UpdateRecommenderCallable(const UpdateRecommenderRequestT& request) const { return SubmitCallable(&PersonalizeClient::UpdateRecommender, request); } /** * An Async wrapper for UpdateRecommender that queues the request into a thread executor and triggers associated callback when operation has finished. */ template void UpdateRecommenderAsync(const UpdateRecommenderRequestT& request, const UpdateRecommenderResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const { return SubmitAsync(&PersonalizeClient::UpdateRecommender, request, handler, context); } void OverrideEndpoint(const Aws::String& endpoint); std::shared_ptr& accessEndpointProvider(); private: friend class Aws::Client::ClientWithAsyncTemplateMethods; void init(const PersonalizeClientConfiguration& clientConfiguration); PersonalizeClientConfiguration m_clientConfiguration; std::shared_ptr m_executor; std::shared_ptr m_endpointProvider; }; } // namespace Personalize } // namespace Aws