/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the machinelearning-2014-12-12.normal.json service model.
*/
using System;
using System.Runtime.ExceptionServices;
using System.Threading;
using System.Threading.Tasks;
using System.Collections.Generic;
using System.Net;
using Amazon.MachineLearning.Model;
using Amazon.MachineLearning.Model.Internal.MarshallTransformations;
using Amazon.MachineLearning.Internal;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
using Amazon.Runtime.Internal.Auth;
using Amazon.Runtime.Internal.Transform;
namespace Amazon.MachineLearning
{
///
/// Implementation for accessing MachineLearning
///
/// Definition of the public APIs exposed by Amazon Machine Learning
///
public partial class AmazonMachineLearningClient : AmazonServiceClient, IAmazonMachineLearning
{
private static IServiceMetadata serviceMetadata = new AmazonMachineLearningMetadata();
private IMachineLearningPaginatorFactory _paginators;
///
/// Paginators for the service
///
public IMachineLearningPaginatorFactory Paginators
{
get
{
if (this._paginators == null)
{
this._paginators = new MachineLearningPaginatorFactory(this);
}
return this._paginators;
}
}
#region Constructors
///
/// Constructs AmazonMachineLearningClient with the credentials loaded from the application's
/// default configuration, and if unsuccessful from the Instance Profile service on an EC2 instance.
///
/// Example App.config with credentials set.
///
/// <?xml version="1.0" encoding="utf-8" ?>
/// <configuration>
/// <appSettings>
/// <add key="AWSProfileName" value="AWS Default"/>
/// </appSettings>
/// </configuration>
///
///
///
public AmazonMachineLearningClient()
: base(FallbackCredentialsFactory.GetCredentials(), new AmazonMachineLearningConfig()) { }
///
/// Constructs AmazonMachineLearningClient with the credentials loaded from the application's
/// default configuration, and if unsuccessful from the Instance Profile service on an EC2 instance.
///
/// Example App.config with credentials set.
///
/// <?xml version="1.0" encoding="utf-8" ?>
/// <configuration>
/// <appSettings>
/// <add key="AWSProfileName" value="AWS Default"/>
/// </appSettings>
/// </configuration>
///
///
///
/// The region to connect.
public AmazonMachineLearningClient(RegionEndpoint region)
: base(FallbackCredentialsFactory.GetCredentials(), new AmazonMachineLearningConfig{RegionEndpoint = region}) { }
///
/// Constructs AmazonMachineLearningClient with the credentials loaded from the application's
/// default configuration, and if unsuccessful from the Instance Profile service on an EC2 instance.
///
/// Example App.config with credentials set.
///
/// <?xml version="1.0" encoding="utf-8" ?>
/// <configuration>
/// <appSettings>
/// <add key="AWSProfileName" value="AWS Default"/>
/// </appSettings>
/// </configuration>
///
///
///
/// The AmazonMachineLearningClient Configuration Object
public AmazonMachineLearningClient(AmazonMachineLearningConfig config)
: base(FallbackCredentialsFactory.GetCredentials(config), config){}
///
/// Constructs AmazonMachineLearningClient with AWS Credentials
///
/// AWS Credentials
public AmazonMachineLearningClient(AWSCredentials credentials)
: this(credentials, new AmazonMachineLearningConfig())
{
}
///
/// Constructs AmazonMachineLearningClient with AWS Credentials
///
/// AWS Credentials
/// The region to connect.
public AmazonMachineLearningClient(AWSCredentials credentials, RegionEndpoint region)
: this(credentials, new AmazonMachineLearningConfig{RegionEndpoint = region})
{
}
///
/// Constructs AmazonMachineLearningClient with AWS Credentials and an
/// AmazonMachineLearningClient Configuration object.
///
/// AWS Credentials
/// The AmazonMachineLearningClient Configuration Object
public AmazonMachineLearningClient(AWSCredentials credentials, AmazonMachineLearningConfig clientConfig)
: base(credentials, clientConfig)
{
}
///
/// Constructs AmazonMachineLearningClient with AWS Access Key ID and AWS Secret Key
///
/// AWS Access Key ID
/// AWS Secret Access Key
public AmazonMachineLearningClient(string awsAccessKeyId, string awsSecretAccessKey)
: this(awsAccessKeyId, awsSecretAccessKey, new AmazonMachineLearningConfig())
{
}
///
/// Constructs AmazonMachineLearningClient with AWS Access Key ID and AWS Secret Key
///
/// AWS Access Key ID
/// AWS Secret Access Key
/// The region to connect.
public AmazonMachineLearningClient(string awsAccessKeyId, string awsSecretAccessKey, RegionEndpoint region)
: this(awsAccessKeyId, awsSecretAccessKey, new AmazonMachineLearningConfig() {RegionEndpoint=region})
{
}
///
/// Constructs AmazonMachineLearningClient with AWS Access Key ID, AWS Secret Key and an
/// AmazonMachineLearningClient Configuration object.
///
/// AWS Access Key ID
/// AWS Secret Access Key
/// The AmazonMachineLearningClient Configuration Object
public AmazonMachineLearningClient(string awsAccessKeyId, string awsSecretAccessKey, AmazonMachineLearningConfig clientConfig)
: base(awsAccessKeyId, awsSecretAccessKey, clientConfig)
{
}
///
/// Constructs AmazonMachineLearningClient with AWS Access Key ID and AWS Secret Key
///
/// AWS Access Key ID
/// AWS Secret Access Key
/// AWS Session Token
public AmazonMachineLearningClient(string awsAccessKeyId, string awsSecretAccessKey, string awsSessionToken)
: this(awsAccessKeyId, awsSecretAccessKey, awsSessionToken, new AmazonMachineLearningConfig())
{
}
///
/// Constructs AmazonMachineLearningClient with AWS Access Key ID and AWS Secret Key
///
/// AWS Access Key ID
/// AWS Secret Access Key
/// AWS Session Token
/// The region to connect.
public AmazonMachineLearningClient(string awsAccessKeyId, string awsSecretAccessKey, string awsSessionToken, RegionEndpoint region)
: this(awsAccessKeyId, awsSecretAccessKey, awsSessionToken, new AmazonMachineLearningConfig{RegionEndpoint = region})
{
}
///
/// Constructs AmazonMachineLearningClient with AWS Access Key ID, AWS Secret Key and an
/// AmazonMachineLearningClient Configuration object.
///
/// AWS Access Key ID
/// AWS Secret Access Key
/// AWS Session Token
/// The AmazonMachineLearningClient Configuration Object
public AmazonMachineLearningClient(string awsAccessKeyId, string awsSecretAccessKey, string awsSessionToken, AmazonMachineLearningConfig clientConfig)
: base(awsAccessKeyId, awsSecretAccessKey, awsSessionToken, clientConfig)
{
}
#endregion
#region Overrides
///
/// Creates the signer for the service.
///
protected override AbstractAWSSigner CreateSigner()
{
return new AWS4Signer();
}
///
/// Customize the pipeline
///
///
protected override void CustomizeRuntimePipeline(RuntimePipeline pipeline)
{
pipeline.AddHandlerAfter(new Amazon.MachineLearning.Internal.ProcessRequestHandler());
pipeline.AddHandlerBefore(new Amazon.MachineLearning.Internal.IdempotencyHandler());
pipeline.RemoveHandler();
pipeline.AddHandlerAfter(new AmazonMachineLearningEndpointResolver());
}
///
/// Capture metadata for the service.
///
protected override IServiceMetadata ServiceMetadata
{
get
{
return serviceMetadata;
}
}
#endregion
#region Dispose
///
/// Disposes the service client.
///
protected override void Dispose(bool disposing)
{
base.Dispose(disposing);
}
#endregion
#region AddTags
///
/// Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key
/// and an optional value. If you add a tag using a key that is already associated with
/// the ML object, AddTags
updates the tag's value.
///
/// Container for the necessary parameters to execute the AddTags service method.
///
/// The response from the AddTags service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
///
///
///
/// A specified resource cannot be located.
///
///
///
///
/// REST API Reference for AddTags Operation
public virtual AddTagsResponse AddTags(AddTagsRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = AddTagsRequestMarshaller.Instance;
options.ResponseUnmarshaller = AddTagsResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key
/// and an optional value. If you add a tag using a key that is already associated with
/// the ML object, AddTags
updates the tag's value.
///
/// Container for the necessary parameters to execute the AddTags service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the AddTags service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
///
///
///
/// A specified resource cannot be located.
///
///
///
///
/// REST API Reference for AddTags Operation
public virtual Task AddTagsAsync(AddTagsRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = AddTagsRequestMarshaller.Instance;
options.ResponseUnmarshaller = AddTagsResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region CreateBatchPrediction
///
/// Generates predictions for a group of observations. The observations to process exist
/// in one or more data files referenced by a DataSource
. This operation
/// creates a new BatchPrediction
, and uses an MLModel
and the
/// data files referenced by the DataSource
as information sources.
///
///
///
/// CreateBatchPrediction
is an asynchronous operation. In response to CreateBatchPrediction
,
/// Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction
/// status to PENDING
. After the BatchPrediction
completes,
/// Amazon ML sets the status to COMPLETED
.
///
///
///
/// You can poll for status updates by using the GetBatchPrediction operation and
/// checking the Status
parameter of the result. After the COMPLETED
/// status appears, the results are available in the location specified by the OutputUri
/// parameter.
///
///
/// Container for the necessary parameters to execute the CreateBatchPrediction service method.
///
/// The response from the CreateBatchPrediction service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateBatchPrediction Operation
public virtual CreateBatchPredictionResponse CreateBatchPrediction(CreateBatchPredictionRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateBatchPredictionRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateBatchPredictionResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Generates predictions for a group of observations. The observations to process exist
/// in one or more data files referenced by a DataSource
. This operation
/// creates a new BatchPrediction
, and uses an MLModel
and the
/// data files referenced by the DataSource
as information sources.
///
///
///
/// CreateBatchPrediction
is an asynchronous operation. In response to CreateBatchPrediction
,
/// Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction
/// status to PENDING
. After the BatchPrediction
completes,
/// Amazon ML sets the status to COMPLETED
.
///
///
///
/// You can poll for status updates by using the GetBatchPrediction operation and
/// checking the Status
parameter of the result. After the COMPLETED
/// status appears, the results are available in the location specified by the OutputUri
/// parameter.
///
///
/// Container for the necessary parameters to execute the CreateBatchPrediction service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the CreateBatchPrediction service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateBatchPrediction Operation
public virtual Task CreateBatchPredictionAsync(CreateBatchPredictionRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateBatchPredictionRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateBatchPredictionResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region CreateDataSourceFromRDS
///
/// Creates a DataSource
object from an
/// Amazon Relational Database Service (Amazon RDS). A DataSource
references
/// data that can be used to perform CreateMLModel
, CreateEvaluation
,
/// or CreateBatchPrediction
operations.
///
///
///
/// CreateDataSourceFromRDS
is an asynchronous operation. In response to
/// CreateDataSourceFromRDS
, Amazon Machine Learning (Amazon ML) immediately
/// returns and sets the DataSource
status to PENDING
. After
/// the DataSource
is created and ready for use, Amazon ML sets the Status
/// parameter to COMPLETED
. DataSource
in the COMPLETED
/// or PENDING
state can be used only to perform >CreateMLModel
>,
/// CreateEvaluation
, or CreateBatchPrediction
operations.
///
///
///
/// If Amazon ML cannot accept the input source, it sets the Status
parameter
/// to FAILED
and includes an error message in the Message
attribute
/// of the GetDataSource
operation response.
///
///
/// Container for the necessary parameters to execute the CreateDataSourceFromRDS service method.
///
/// The response from the CreateDataSourceFromRDS service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateDataSourceFromRDS Operation
public virtual CreateDataSourceFromRDSResponse CreateDataSourceFromRDS(CreateDataSourceFromRDSRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateDataSourceFromRDSRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateDataSourceFromRDSResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Creates a DataSource
object from an
/// Amazon Relational Database Service (Amazon RDS). A DataSource
references
/// data that can be used to perform CreateMLModel
, CreateEvaluation
,
/// or CreateBatchPrediction
operations.
///
///
///
/// CreateDataSourceFromRDS
is an asynchronous operation. In response to
/// CreateDataSourceFromRDS
, Amazon Machine Learning (Amazon ML) immediately
/// returns and sets the DataSource
status to PENDING
. After
/// the DataSource
is created and ready for use, Amazon ML sets the Status
/// parameter to COMPLETED
. DataSource
in the COMPLETED
/// or PENDING
state can be used only to perform >CreateMLModel
>,
/// CreateEvaluation
, or CreateBatchPrediction
operations.
///
///
///
/// If Amazon ML cannot accept the input source, it sets the Status
parameter
/// to FAILED
and includes an error message in the Message
attribute
/// of the GetDataSource
operation response.
///
///
/// Container for the necessary parameters to execute the CreateDataSourceFromRDS service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the CreateDataSourceFromRDS service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateDataSourceFromRDS Operation
public virtual Task CreateDataSourceFromRDSAsync(CreateDataSourceFromRDSRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateDataSourceFromRDSRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateDataSourceFromRDSResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region CreateDataSourceFromRedshift
///
/// Creates a DataSource
from a database hosted on an Amazon Redshift cluster.
/// A DataSource
references data that can be used to perform either CreateMLModel
,
/// CreateEvaluation
, or CreateBatchPrediction
operations.
///
///
///
/// CreateDataSourceFromRedshift
is an asynchronous operation. In response
/// to CreateDataSourceFromRedshift
, Amazon Machine Learning (Amazon ML)
/// immediately returns and sets the DataSource
status to PENDING
.
/// After the DataSource
is created and ready for use, Amazon ML sets the
/// Status
parameter to COMPLETED
. DataSource
in
/// COMPLETED
or PENDING
states can be used to perform only
/// CreateMLModel
, CreateEvaluation
, or CreateBatchPrediction
/// operations.
///
///
///
/// If Amazon ML can't accept the input source, it sets the Status
parameter
/// to FAILED
and includes an error message in the Message
attribute
/// of the GetDataSource
operation response.
///
///
///
/// The observations should be contained in the database hosted on an Amazon Redshift
/// cluster and should be specified by a SelectSqlQuery
query. Amazon ML
/// executes an Unload
command in Amazon Redshift to transfer the result
/// set of the SelectSqlQuery
query to S3StagingLocation
.
///
///
///
/// After the DataSource
has been created, it's ready for use in evaluations
/// and batch predictions. If you plan to use the DataSource
to train an
/// MLModel
, the DataSource
also requires a recipe. A recipe
/// describes how each input variable will be used in training an MLModel
.
/// Will the variable be included or excluded from training? Will the variable be manipulated;
/// for example, will it be combined with another variable or will it be split apart into
/// word combinations? The recipe provides answers to these questions.
///
///
///
/// You can't change an existing datasource, but you can copy and modify the settings
/// from an existing Amazon Redshift datasource to create a new datasource. To do so,
/// call GetDataSource
for an existing datasource and copy the values to
/// a CreateDataSource
call. Change the settings that you want to change
/// and make sure that all required fields have the appropriate values.
///
///
/// Container for the necessary parameters to execute the CreateDataSourceFromRedshift service method.
///
/// The response from the CreateDataSourceFromRedshift service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateDataSourceFromRedshift Operation
public virtual CreateDataSourceFromRedshiftResponse CreateDataSourceFromRedshift(CreateDataSourceFromRedshiftRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateDataSourceFromRedshiftRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateDataSourceFromRedshiftResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Creates a DataSource
from a database hosted on an Amazon Redshift cluster.
/// A DataSource
references data that can be used to perform either CreateMLModel
,
/// CreateEvaluation
, or CreateBatchPrediction
operations.
///
///
///
/// CreateDataSourceFromRedshift
is an asynchronous operation. In response
/// to CreateDataSourceFromRedshift
, Amazon Machine Learning (Amazon ML)
/// immediately returns and sets the DataSource
status to PENDING
.
/// After the DataSource
is created and ready for use, Amazon ML sets the
/// Status
parameter to COMPLETED
. DataSource
in
/// COMPLETED
or PENDING
states can be used to perform only
/// CreateMLModel
, CreateEvaluation
, or CreateBatchPrediction
/// operations.
///
///
///
/// If Amazon ML can't accept the input source, it sets the Status
parameter
/// to FAILED
and includes an error message in the Message
attribute
/// of the GetDataSource
operation response.
///
///
///
/// The observations should be contained in the database hosted on an Amazon Redshift
/// cluster and should be specified by a SelectSqlQuery
query. Amazon ML
/// executes an Unload
command in Amazon Redshift to transfer the result
/// set of the SelectSqlQuery
query to S3StagingLocation
.
///
///
///
/// After the DataSource
has been created, it's ready for use in evaluations
/// and batch predictions. If you plan to use the DataSource
to train an
/// MLModel
, the DataSource
also requires a recipe. A recipe
/// describes how each input variable will be used in training an MLModel
.
/// Will the variable be included or excluded from training? Will the variable be manipulated;
/// for example, will it be combined with another variable or will it be split apart into
/// word combinations? The recipe provides answers to these questions.
///
///
///
/// You can't change an existing datasource, but you can copy and modify the settings
/// from an existing Amazon Redshift datasource to create a new datasource. To do so,
/// call GetDataSource
for an existing datasource and copy the values to
/// a CreateDataSource
call. Change the settings that you want to change
/// and make sure that all required fields have the appropriate values.
///
///
/// Container for the necessary parameters to execute the CreateDataSourceFromRedshift service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the CreateDataSourceFromRedshift service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateDataSourceFromRedshift Operation
public virtual Task CreateDataSourceFromRedshiftAsync(CreateDataSourceFromRedshiftRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateDataSourceFromRedshiftRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateDataSourceFromRedshiftResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region CreateDataSourceFromS3
///
/// Creates a DataSource
object. A DataSource
references data
/// that can be used to perform CreateMLModel
, CreateEvaluation
,
/// or CreateBatchPrediction
operations.
///
///
///
/// CreateDataSourceFromS3
is an asynchronous operation. In response to
/// CreateDataSourceFromS3
, Amazon Machine Learning (Amazon ML) immediately
/// returns and sets the DataSource
status to PENDING
. After
/// the DataSource
has been created and is ready for use, Amazon ML sets
/// the Status
parameter to COMPLETED
. DataSource
/// in the COMPLETED
or PENDING
state can be used to perform
/// only CreateMLModel
, CreateEvaluation
or CreateBatchPrediction
/// operations.
///
///
///
/// If Amazon ML can't accept the input source, it sets the Status
parameter
/// to FAILED
and includes an error message in the Message
attribute
/// of the GetDataSource
operation response.
///
///
///
/// The observation data used in a DataSource
should be ready to use; that
/// is, it should have a consistent structure, and missing data values should be kept
/// to a minimum. The observation data must reside in one or more .csv files in an Amazon
/// Simple Storage Service (Amazon S3) location, along with a schema that describes the
/// data items by name and type. The same schema must be used for all of the data files
/// referenced by the DataSource
.
///
///
///
/// After the DataSource
has been created, it's ready to use in evaluations
/// and batch predictions. If you plan to use the DataSource
to train an
/// MLModel
, the DataSource
also needs a recipe. A recipe describes
/// how each input variable will be used in training an MLModel
. Will the
/// variable be included or excluded from training? Will the variable be manipulated;
/// for example, will it be combined with another variable or will it be split apart into
/// word combinations? The recipe provides answers to these questions.
///
///
/// Container for the necessary parameters to execute the CreateDataSourceFromS3 service method.
///
/// The response from the CreateDataSourceFromS3 service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateDataSourceFromS3 Operation
public virtual CreateDataSourceFromS3Response CreateDataSourceFromS3(CreateDataSourceFromS3Request request)
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateDataSourceFromS3RequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateDataSourceFromS3ResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Creates a DataSource
object. A DataSource
references data
/// that can be used to perform CreateMLModel
, CreateEvaluation
,
/// or CreateBatchPrediction
operations.
///
///
///
/// CreateDataSourceFromS3
is an asynchronous operation. In response to
/// CreateDataSourceFromS3
, Amazon Machine Learning (Amazon ML) immediately
/// returns and sets the DataSource
status to PENDING
. After
/// the DataSource
has been created and is ready for use, Amazon ML sets
/// the Status
parameter to COMPLETED
. DataSource
/// in the COMPLETED
or PENDING
state can be used to perform
/// only CreateMLModel
, CreateEvaluation
or CreateBatchPrediction
/// operations.
///
///
///
/// If Amazon ML can't accept the input source, it sets the Status
parameter
/// to FAILED
and includes an error message in the Message
attribute
/// of the GetDataSource
operation response.
///
///
///
/// The observation data used in a DataSource
should be ready to use; that
/// is, it should have a consistent structure, and missing data values should be kept
/// to a minimum. The observation data must reside in one or more .csv files in an Amazon
/// Simple Storage Service (Amazon S3) location, along with a schema that describes the
/// data items by name and type. The same schema must be used for all of the data files
/// referenced by the DataSource
.
///
///
///
/// After the DataSource
has been created, it's ready to use in evaluations
/// and batch predictions. If you plan to use the DataSource
to train an
/// MLModel
, the DataSource
also needs a recipe. A recipe describes
/// how each input variable will be used in training an MLModel
. Will the
/// variable be included or excluded from training? Will the variable be manipulated;
/// for example, will it be combined with another variable or will it be split apart into
/// word combinations? The recipe provides answers to these questions.
///
///
/// Container for the necessary parameters to execute the CreateDataSourceFromS3 service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the CreateDataSourceFromS3 service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateDataSourceFromS3 Operation
public virtual Task CreateDataSourceFromS3Async(CreateDataSourceFromS3Request request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateDataSourceFromS3RequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateDataSourceFromS3ResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region CreateEvaluation
///
/// Creates a new Evaluation
of an MLModel
. An MLModel
/// is evaluated on a set of observations associated to a DataSource
. Like
/// a DataSource
for an MLModel
, the DataSource
/// for an Evaluation
contains values for the Target Variable
.
/// The Evaluation
compares the predicted result for each observation to
/// the actual outcome and provides a summary so that you know how effective the MLModel
/// functions on the test data. Evaluation generates a relevant performance metric, such
/// as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding MLModelType
:
/// BINARY
, REGRESSION
or MULTICLASS
.
///
///
///
/// CreateEvaluation
is an asynchronous operation. In response to CreateEvaluation
,
/// Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status
/// to PENDING
. After the Evaluation
is created and ready for
/// use, Amazon ML sets the status to COMPLETED
.
///
///
///
/// You can use the GetEvaluation
operation to check progress of the evaluation
/// during the creation operation.
///
///
/// Container for the necessary parameters to execute the CreateEvaluation service method.
///
/// The response from the CreateEvaluation service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateEvaluation Operation
public virtual CreateEvaluationResponse CreateEvaluation(CreateEvaluationRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateEvaluationRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateEvaluationResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Creates a new Evaluation
of an MLModel
. An MLModel
/// is evaluated on a set of observations associated to a DataSource
. Like
/// a DataSource
for an MLModel
, the DataSource
/// for an Evaluation
contains values for the Target Variable
.
/// The Evaluation
compares the predicted result for each observation to
/// the actual outcome and provides a summary so that you know how effective the MLModel
/// functions on the test data. Evaluation generates a relevant performance metric, such
/// as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding MLModelType
:
/// BINARY
, REGRESSION
or MULTICLASS
.
///
///
///
/// CreateEvaluation
is an asynchronous operation. In response to CreateEvaluation
,
/// Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status
/// to PENDING
. After the Evaluation
is created and ready for
/// use, Amazon ML sets the status to COMPLETED
.
///
///
///
/// You can use the GetEvaluation
operation to check progress of the evaluation
/// during the creation operation.
///
///
/// Container for the necessary parameters to execute the CreateEvaluation service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the CreateEvaluation service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateEvaluation Operation
public virtual Task CreateEvaluationAsync(CreateEvaluationRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateEvaluationRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateEvaluationResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region CreateMLModel
///
/// Creates a new MLModel
using the DataSource
and the recipe
/// as information sources.
///
///
///
/// An MLModel
is nearly immutable. Users can update only the MLModelName
/// and the ScoreThreshold
in an MLModel
without creating a
/// new MLModel
.
///
///
///
/// CreateMLModel
is an asynchronous operation. In response to CreateMLModel
,
/// Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel
/// status to PENDING
. After the MLModel
has been created and
/// ready is for use, Amazon ML sets the status to COMPLETED
.
///
///
///
/// You can use the GetMLModel
operation to check the progress of the MLModel
/// during the creation operation.
///
///
///
/// CreateMLModel
requires a DataSource
with computed statistics,
/// which can be created by setting ComputeStatistics
to true
/// in CreateDataSourceFromRDS
, CreateDataSourceFromS3
, or CreateDataSourceFromRedshift
/// operations.
///
///
/// Container for the necessary parameters to execute the CreateMLModel service method.
///
/// The response from the CreateMLModel service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateMLModel Operation
public virtual CreateMLModelResponse CreateMLModel(CreateMLModelRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateMLModelRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateMLModelResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Creates a new MLModel
using the DataSource
and the recipe
/// as information sources.
///
///
///
/// An MLModel
is nearly immutable. Users can update only the MLModelName
/// and the ScoreThreshold
in an MLModel
without creating a
/// new MLModel
.
///
///
///
/// CreateMLModel
is an asynchronous operation. In response to CreateMLModel
,
/// Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel
/// status to PENDING
. After the MLModel
has been created and
/// ready is for use, Amazon ML sets the status to COMPLETED
.
///
///
///
/// You can use the GetMLModel
operation to check the progress of the MLModel
/// during the creation operation.
///
///
///
/// CreateMLModel
requires a DataSource
with computed statistics,
/// which can be created by setting ComputeStatistics
to true
/// in CreateDataSourceFromRDS
, CreateDataSourceFromS3
, or CreateDataSourceFromRedshift
/// operations.
///
///
/// Container for the necessary parameters to execute the CreateMLModel service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the CreateMLModel service method, as returned by MachineLearning.
///
/// A second request to use or change an object was not allowed. This can result from
/// retrying a request using a parameter that was not present in the original request.
///
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for CreateMLModel Operation
public virtual Task CreateMLModelAsync(CreateMLModelRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateMLModelRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateMLModelResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region CreateRealtimeEndpoint
///
/// Creates a real-time endpoint for the MLModel
. The endpoint contains the
/// URI of the MLModel
; that is, the location to send real-time prediction
/// requests for the specified MLModel
.
///
/// The ID assigned to the MLModel
during creation.
///
/// The response from the CreateRealtimeEndpoint service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for CreateRealtimeEndpoint Operation
public virtual CreateRealtimeEndpointResponse CreateRealtimeEndpoint(string mlModelId)
{
var request = new CreateRealtimeEndpointRequest();
request.MLModelId = mlModelId;
return CreateRealtimeEndpoint(request);
}
///
/// Creates a real-time endpoint for the MLModel
. The endpoint contains the
/// URI of the MLModel
; that is, the location to send real-time prediction
/// requests for the specified MLModel
.
///
/// Container for the necessary parameters to execute the CreateRealtimeEndpoint service method.
///
/// The response from the CreateRealtimeEndpoint service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for CreateRealtimeEndpoint Operation
public virtual CreateRealtimeEndpointResponse CreateRealtimeEndpoint(CreateRealtimeEndpointRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateRealtimeEndpointRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateRealtimeEndpointResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Creates a real-time endpoint for the MLModel
. The endpoint contains the
/// URI of the MLModel
; that is, the location to send real-time prediction
/// requests for the specified MLModel
.
///
/// The ID assigned to the MLModel
during creation.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the CreateRealtimeEndpoint service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for CreateRealtimeEndpoint Operation
public virtual Task CreateRealtimeEndpointAsync(string mlModelId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new CreateRealtimeEndpointRequest();
request.MLModelId = mlModelId;
return CreateRealtimeEndpointAsync(request, cancellationToken);
}
///
/// Creates a real-time endpoint for the MLModel
. The endpoint contains the
/// URI of the MLModel
; that is, the location to send real-time prediction
/// requests for the specified MLModel
.
///
/// Container for the necessary parameters to execute the CreateRealtimeEndpoint service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the CreateRealtimeEndpoint service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for CreateRealtimeEndpoint Operation
public virtual Task CreateRealtimeEndpointAsync(CreateRealtimeEndpointRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = CreateRealtimeEndpointRequestMarshaller.Instance;
options.ResponseUnmarshaller = CreateRealtimeEndpointResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DeleteBatchPrediction
///
/// Assigns the DELETED status to a BatchPrediction
, rendering it unusable.
///
///
///
/// After using the DeleteBatchPrediction
operation, you can use the GetBatchPrediction
/// operation to verify that the status of the BatchPrediction
changed to
/// DELETED.
///
///
///
/// Caution: The result of the DeleteBatchPrediction
operation is
/// irreversible.
///
///
/// A user-supplied ID that uniquely identifies the BatchPrediction
.
///
/// The response from the DeleteBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteBatchPrediction Operation
public virtual DeleteBatchPredictionResponse DeleteBatchPrediction(string batchPredictionId)
{
var request = new DeleteBatchPredictionRequest();
request.BatchPredictionId = batchPredictionId;
return DeleteBatchPrediction(request);
}
///
/// Assigns the DELETED status to a BatchPrediction
, rendering it unusable.
///
///
///
/// After using the DeleteBatchPrediction
operation, you can use the GetBatchPrediction
/// operation to verify that the status of the BatchPrediction
changed to
/// DELETED.
///
///
///
/// Caution: The result of the DeleteBatchPrediction
operation is
/// irreversible.
///
///
/// Container for the necessary parameters to execute the DeleteBatchPrediction service method.
///
/// The response from the DeleteBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteBatchPrediction Operation
public virtual DeleteBatchPredictionResponse DeleteBatchPrediction(DeleteBatchPredictionRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteBatchPredictionRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteBatchPredictionResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Assigns the DELETED status to a BatchPrediction
, rendering it unusable.
///
///
///
/// After using the DeleteBatchPrediction
operation, you can use the GetBatchPrediction
/// operation to verify that the status of the BatchPrediction
changed to
/// DELETED.
///
///
///
/// Caution: The result of the DeleteBatchPrediction
operation is
/// irreversible.
///
///
/// A user-supplied ID that uniquely identifies the BatchPrediction
.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteBatchPrediction Operation
public virtual Task DeleteBatchPredictionAsync(string batchPredictionId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new DeleteBatchPredictionRequest();
request.BatchPredictionId = batchPredictionId;
return DeleteBatchPredictionAsync(request, cancellationToken);
}
///
/// Assigns the DELETED status to a BatchPrediction
, rendering it unusable.
///
///
///
/// After using the DeleteBatchPrediction
operation, you can use the GetBatchPrediction
/// operation to verify that the status of the BatchPrediction
changed to
/// DELETED.
///
///
///
/// Caution: The result of the DeleteBatchPrediction
operation is
/// irreversible.
///
///
/// Container for the necessary parameters to execute the DeleteBatchPrediction service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteBatchPrediction Operation
public virtual Task DeleteBatchPredictionAsync(DeleteBatchPredictionRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteBatchPredictionRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteBatchPredictionResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DeleteDataSource
///
/// Assigns the DELETED status to a DataSource
, rendering it unusable.
///
///
///
/// After using the DeleteDataSource
operation, you can use the GetDataSource
/// operation to verify that the status of the DataSource
changed to DELETED.
///
///
///
/// Caution: The results of the DeleteDataSource
operation are irreversible.
///
///
/// A user-supplied ID that uniquely identifies the DataSource
.
///
/// The response from the DeleteDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteDataSource Operation
public virtual DeleteDataSourceResponse DeleteDataSource(string dataSourceId)
{
var request = new DeleteDataSourceRequest();
request.DataSourceId = dataSourceId;
return DeleteDataSource(request);
}
///
/// Assigns the DELETED status to a DataSource
, rendering it unusable.
///
///
///
/// After using the DeleteDataSource
operation, you can use the GetDataSource
/// operation to verify that the status of the DataSource
changed to DELETED.
///
///
///
/// Caution: The results of the DeleteDataSource
operation are irreversible.
///
///
/// Container for the necessary parameters to execute the DeleteDataSource service method.
///
/// The response from the DeleteDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteDataSource Operation
public virtual DeleteDataSourceResponse DeleteDataSource(DeleteDataSourceRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteDataSourceRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteDataSourceResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Assigns the DELETED status to a DataSource
, rendering it unusable.
///
///
///
/// After using the DeleteDataSource
operation, you can use the GetDataSource
/// operation to verify that the status of the DataSource
changed to DELETED.
///
///
///
/// Caution: The results of the DeleteDataSource
operation are irreversible.
///
///
/// A user-supplied ID that uniquely identifies the DataSource
.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteDataSource Operation
public virtual Task DeleteDataSourceAsync(string dataSourceId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new DeleteDataSourceRequest();
request.DataSourceId = dataSourceId;
return DeleteDataSourceAsync(request, cancellationToken);
}
///
/// Assigns the DELETED status to a DataSource
, rendering it unusable.
///
///
///
/// After using the DeleteDataSource
operation, you can use the GetDataSource
/// operation to verify that the status of the DataSource
changed to DELETED.
///
///
///
/// Caution: The results of the DeleteDataSource
operation are irreversible.
///
///
/// Container for the necessary parameters to execute the DeleteDataSource service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteDataSource Operation
public virtual Task DeleteDataSourceAsync(DeleteDataSourceRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteDataSourceRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteDataSourceResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DeleteEvaluation
///
/// Assigns the DELETED
status to an Evaluation
, rendering it
/// unusable.
///
///
///
/// After invoking the DeleteEvaluation
operation, you can use the GetEvaluation
/// operation to verify that the status of the Evaluation
changed to DELETED
.
///
///
///
/// Caution: The results of the DeleteEvaluation
operation are irreversible.
///
///
/// A user-supplied ID that uniquely identifies the Evaluation
to delete.
///
/// The response from the DeleteEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteEvaluation Operation
public virtual DeleteEvaluationResponse DeleteEvaluation(string evaluationId)
{
var request = new DeleteEvaluationRequest();
request.EvaluationId = evaluationId;
return DeleteEvaluation(request);
}
///
/// Assigns the DELETED
status to an Evaluation
, rendering it
/// unusable.
///
///
///
/// After invoking the DeleteEvaluation
operation, you can use the GetEvaluation
/// operation to verify that the status of the Evaluation
changed to DELETED
.
///
///
///
/// Caution: The results of the DeleteEvaluation
operation are irreversible.
///
///
/// Container for the necessary parameters to execute the DeleteEvaluation service method.
///
/// The response from the DeleteEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteEvaluation Operation
public virtual DeleteEvaluationResponse DeleteEvaluation(DeleteEvaluationRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteEvaluationRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteEvaluationResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Assigns the DELETED
status to an Evaluation
, rendering it
/// unusable.
///
///
///
/// After invoking the DeleteEvaluation
operation, you can use the GetEvaluation
/// operation to verify that the status of the Evaluation
changed to DELETED
.
///
///
///
/// Caution: The results of the DeleteEvaluation
operation are irreversible.
///
///
/// A user-supplied ID that uniquely identifies the Evaluation
to delete.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteEvaluation Operation
public virtual Task DeleteEvaluationAsync(string evaluationId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new DeleteEvaluationRequest();
request.EvaluationId = evaluationId;
return DeleteEvaluationAsync(request, cancellationToken);
}
///
/// Assigns the DELETED
status to an Evaluation
, rendering it
/// unusable.
///
///
///
/// After invoking the DeleteEvaluation
operation, you can use the GetEvaluation
/// operation to verify that the status of the Evaluation
changed to DELETED
.
///
///
///
/// Caution: The results of the DeleteEvaluation
operation are irreversible.
///
///
/// Container for the necessary parameters to execute the DeleteEvaluation service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteEvaluation Operation
public virtual Task DeleteEvaluationAsync(DeleteEvaluationRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteEvaluationRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteEvaluationResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DeleteMLModel
///
/// Assigns the DELETED
status to an MLModel
, rendering it unusable.
///
///
///
/// After using the DeleteMLModel
operation, you can use the GetMLModel
/// operation to verify that the status of the MLModel
changed to DELETED.
///
///
///
/// Caution: The result of the DeleteMLModel
operation is irreversible.
///
///
/// A user-supplied ID that uniquely identifies the MLModel
.
///
/// The response from the DeleteMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteMLModel Operation
public virtual DeleteMLModelResponse DeleteMLModel(string mlModelId)
{
var request = new DeleteMLModelRequest();
request.MLModelId = mlModelId;
return DeleteMLModel(request);
}
///
/// Assigns the DELETED
status to an MLModel
, rendering it unusable.
///
///
///
/// After using the DeleteMLModel
operation, you can use the GetMLModel
/// operation to verify that the status of the MLModel
changed to DELETED.
///
///
///
/// Caution: The result of the DeleteMLModel
operation is irreversible.
///
///
/// Container for the necessary parameters to execute the DeleteMLModel service method.
///
/// The response from the DeleteMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteMLModel Operation
public virtual DeleteMLModelResponse DeleteMLModel(DeleteMLModelRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteMLModelRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteMLModelResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Assigns the DELETED
status to an MLModel
, rendering it unusable.
///
///
///
/// After using the DeleteMLModel
operation, you can use the GetMLModel
/// operation to verify that the status of the MLModel
changed to DELETED.
///
///
///
/// Caution: The result of the DeleteMLModel
operation is irreversible.
///
///
/// A user-supplied ID that uniquely identifies the MLModel
.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteMLModel Operation
public virtual Task DeleteMLModelAsync(string mlModelId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new DeleteMLModelRequest();
request.MLModelId = mlModelId;
return DeleteMLModelAsync(request, cancellationToken);
}
///
/// Assigns the DELETED
status to an MLModel
, rendering it unusable.
///
///
///
/// After using the DeleteMLModel
operation, you can use the GetMLModel
/// operation to verify that the status of the MLModel
changed to DELETED.
///
///
///
/// Caution: The result of the DeleteMLModel
operation is irreversible.
///
///
/// Container for the necessary parameters to execute the DeleteMLModel service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteMLModel Operation
public virtual Task DeleteMLModelAsync(DeleteMLModelRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteMLModelRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteMLModelResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DeleteRealtimeEndpoint
///
/// Deletes a real time endpoint of an MLModel
.
///
/// The ID assigned to the MLModel
during creation.
///
/// The response from the DeleteRealtimeEndpoint service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteRealtimeEndpoint Operation
public virtual DeleteRealtimeEndpointResponse DeleteRealtimeEndpoint(string mlModelId)
{
var request = new DeleteRealtimeEndpointRequest();
request.MLModelId = mlModelId;
return DeleteRealtimeEndpoint(request);
}
///
/// Deletes a real time endpoint of an MLModel
.
///
/// Container for the necessary parameters to execute the DeleteRealtimeEndpoint service method.
///
/// The response from the DeleteRealtimeEndpoint service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteRealtimeEndpoint Operation
public virtual DeleteRealtimeEndpointResponse DeleteRealtimeEndpoint(DeleteRealtimeEndpointRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteRealtimeEndpointRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteRealtimeEndpointResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Deletes a real time endpoint of an MLModel
.
///
/// The ID assigned to the MLModel
during creation.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteRealtimeEndpoint service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteRealtimeEndpoint Operation
public virtual Task DeleteRealtimeEndpointAsync(string mlModelId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new DeleteRealtimeEndpointRequest();
request.MLModelId = mlModelId;
return DeleteRealtimeEndpointAsync(request, cancellationToken);
}
///
/// Deletes a real time endpoint of an MLModel
.
///
/// Container for the necessary parameters to execute the DeleteRealtimeEndpoint service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteRealtimeEndpoint service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteRealtimeEndpoint Operation
public virtual Task DeleteRealtimeEndpointAsync(DeleteRealtimeEndpointRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteRealtimeEndpointRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteRealtimeEndpointResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DeleteTags
///
/// Deletes the specified tags associated with an ML object. After this operation is complete,
/// you can't recover deleted tags.
///
///
///
/// If you specify a tag that doesn't exist, Amazon ML ignores it.
///
///
/// Container for the necessary parameters to execute the DeleteTags service method.
///
/// The response from the DeleteTags service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
///
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteTags Operation
public virtual DeleteTagsResponse DeleteTags(DeleteTagsRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteTagsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteTagsResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Deletes the specified tags associated with an ML object. After this operation is complete,
/// you can't recover deleted tags.
///
///
///
/// If you specify a tag that doesn't exist, Amazon ML ignores it.
///
///
/// Container for the necessary parameters to execute the DeleteTags service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DeleteTags service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
///
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DeleteTags Operation
public virtual Task DeleteTagsAsync(DeleteTagsRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DeleteTagsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DeleteTagsResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DescribeBatchPredictions
///
/// Returns a list of BatchPrediction
operations that match the search criteria
/// in the request.
///
/// Container for the necessary parameters to execute the DescribeBatchPredictions service method.
///
/// The response from the DescribeBatchPredictions service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for DescribeBatchPredictions Operation
public virtual DescribeBatchPredictionsResponse DescribeBatchPredictions(DescribeBatchPredictionsRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeBatchPredictionsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeBatchPredictionsResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Returns a list of BatchPrediction
operations that match the search criteria
/// in the request.
///
/// Container for the necessary parameters to execute the DescribeBatchPredictions service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DescribeBatchPredictions service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for DescribeBatchPredictions Operation
public virtual Task DescribeBatchPredictionsAsync(DescribeBatchPredictionsRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeBatchPredictionsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeBatchPredictionsResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DescribeDataSources
///
/// Returns a list of DataSource
that match the search criteria in the request.
///
/// Container for the necessary parameters to execute the DescribeDataSources service method.
///
/// The response from the DescribeDataSources service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for DescribeDataSources Operation
public virtual DescribeDataSourcesResponse DescribeDataSources(DescribeDataSourcesRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeDataSourcesRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeDataSourcesResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Returns a list of DataSource
that match the search criteria in the request.
///
/// Container for the necessary parameters to execute the DescribeDataSources service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DescribeDataSources service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for DescribeDataSources Operation
public virtual Task DescribeDataSourcesAsync(DescribeDataSourcesRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeDataSourcesRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeDataSourcesResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DescribeEvaluations
///
/// Returns a list of DescribeEvaluations
that match the search criteria
/// in the request.
///
/// Container for the necessary parameters to execute the DescribeEvaluations service method.
///
/// The response from the DescribeEvaluations service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for DescribeEvaluations Operation
public virtual DescribeEvaluationsResponse DescribeEvaluations(DescribeEvaluationsRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeEvaluationsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeEvaluationsResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Returns a list of DescribeEvaluations
that match the search criteria
/// in the request.
///
/// Container for the necessary parameters to execute the DescribeEvaluations service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DescribeEvaluations service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for DescribeEvaluations Operation
public virtual Task DescribeEvaluationsAsync(DescribeEvaluationsRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeEvaluationsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeEvaluationsResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DescribeMLModels
///
/// Returns a list of MLModel
that match the search criteria in the request.
///
/// Container for the necessary parameters to execute the DescribeMLModels service method.
///
/// The response from the DescribeMLModels service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for DescribeMLModels Operation
public virtual DescribeMLModelsResponse DescribeMLModels(DescribeMLModelsRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeMLModelsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeMLModelsResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Returns a list of MLModel
that match the search criteria in the request.
///
/// Container for the necessary parameters to execute the DescribeMLModels service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DescribeMLModels service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
/// REST API Reference for DescribeMLModels Operation
public virtual Task DescribeMLModelsAsync(DescribeMLModelsRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeMLModelsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeMLModelsResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region DescribeTags
///
/// Describes one or more of the tags for your Amazon ML object.
///
/// Container for the necessary parameters to execute the DescribeTags service method.
///
/// The response from the DescribeTags service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DescribeTags Operation
public virtual DescribeTagsResponse DescribeTags(DescribeTagsRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeTagsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeTagsResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Describes one or more of the tags for your Amazon ML object.
///
/// Container for the necessary parameters to execute the DescribeTags service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the DescribeTags service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for DescribeTags Operation
public virtual Task DescribeTagsAsync(DescribeTagsRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = DescribeTagsRequestMarshaller.Instance;
options.ResponseUnmarshaller = DescribeTagsResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region GetBatchPrediction
///
/// Returns a BatchPrediction
that includes detailed metadata, status, and
/// data file information for a Batch Prediction
request.
///
/// An ID assigned to the BatchPrediction
at creation.
///
/// The response from the GetBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetBatchPrediction Operation
public virtual GetBatchPredictionResponse GetBatchPrediction(string batchPredictionId)
{
var request = new GetBatchPredictionRequest();
request.BatchPredictionId = batchPredictionId;
return GetBatchPrediction(request);
}
///
/// Returns a BatchPrediction
that includes detailed metadata, status, and
/// data file information for a Batch Prediction
request.
///
/// Container for the necessary parameters to execute the GetBatchPrediction service method.
///
/// The response from the GetBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetBatchPrediction Operation
public virtual GetBatchPredictionResponse GetBatchPrediction(GetBatchPredictionRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = GetBatchPredictionRequestMarshaller.Instance;
options.ResponseUnmarshaller = GetBatchPredictionResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Returns a BatchPrediction
that includes detailed metadata, status, and
/// data file information for a Batch Prediction
request.
///
/// An ID assigned to the BatchPrediction
at creation.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetBatchPrediction Operation
public virtual Task GetBatchPredictionAsync(string batchPredictionId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new GetBatchPredictionRequest();
request.BatchPredictionId = batchPredictionId;
return GetBatchPredictionAsync(request, cancellationToken);
}
///
/// Returns a BatchPrediction
that includes detailed metadata, status, and
/// data file information for a Batch Prediction
request.
///
/// Container for the necessary parameters to execute the GetBatchPrediction service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetBatchPrediction Operation
public virtual Task GetBatchPredictionAsync(GetBatchPredictionRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = GetBatchPredictionRequestMarshaller.Instance;
options.ResponseUnmarshaller = GetBatchPredictionResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region GetDataSource
///
/// Returns a DataSource
that includes metadata and data file information,
/// as well as the current status of the DataSource
.
///
///
///
/// GetDataSource
provides results in normal or verbose format. The verbose
/// format adds the schema description and the list of files pointed to by the DataSource
/// to the normal format.
///
///
/// The ID assigned to the DataSource
at creation.
///
/// The response from the GetDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetDataSource Operation
public virtual GetDataSourceResponse GetDataSource(string dataSourceId)
{
var request = new GetDataSourceRequest();
request.DataSourceId = dataSourceId;
return GetDataSource(request);
}
///
/// Returns a DataSource
that includes metadata and data file information,
/// as well as the current status of the DataSource
.
///
///
///
/// GetDataSource
provides results in normal or verbose format. The verbose
/// format adds the schema description and the list of files pointed to by the DataSource
/// to the normal format.
///
///
/// The ID assigned to the DataSource
at creation.
/// Specifies whether the GetDataSource
operation should return DataSourceSchema
. If true, DataSourceSchema
is returned. If false, DataSourceSchema
is not returned.
///
/// The response from the GetDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetDataSource Operation
public virtual GetDataSourceResponse GetDataSource(string dataSourceId, bool verbose)
{
var request = new GetDataSourceRequest();
request.DataSourceId = dataSourceId;
request.Verbose = verbose;
return GetDataSource(request);
}
///
/// Returns a DataSource
that includes metadata and data file information,
/// as well as the current status of the DataSource
.
///
///
///
/// GetDataSource
provides results in normal or verbose format. The verbose
/// format adds the schema description and the list of files pointed to by the DataSource
/// to the normal format.
///
///
/// Container for the necessary parameters to execute the GetDataSource service method.
///
/// The response from the GetDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetDataSource Operation
public virtual GetDataSourceResponse GetDataSource(GetDataSourceRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = GetDataSourceRequestMarshaller.Instance;
options.ResponseUnmarshaller = GetDataSourceResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Returns a DataSource
that includes metadata and data file information,
/// as well as the current status of the DataSource
.
///
///
///
/// GetDataSource
provides results in normal or verbose format. The verbose
/// format adds the schema description and the list of files pointed to by the DataSource
/// to the normal format.
///
///
/// The ID assigned to the DataSource
at creation.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetDataSource Operation
public virtual Task GetDataSourceAsync(string dataSourceId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new GetDataSourceRequest();
request.DataSourceId = dataSourceId;
return GetDataSourceAsync(request, cancellationToken);
}
///
/// Returns a DataSource
that includes metadata and data file information,
/// as well as the current status of the DataSource
.
///
///
///
/// GetDataSource
provides results in normal or verbose format. The verbose
/// format adds the schema description and the list of files pointed to by the DataSource
/// to the normal format.
///
///
/// The ID assigned to the DataSource
at creation.
/// Specifies whether the GetDataSource
operation should return DataSourceSchema
. If true, DataSourceSchema
is returned. If false, DataSourceSchema
is not returned.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetDataSource Operation
public virtual Task GetDataSourceAsync(string dataSourceId, bool verbose, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new GetDataSourceRequest();
request.DataSourceId = dataSourceId;
request.Verbose = verbose;
return GetDataSourceAsync(request, cancellationToken);
}
///
/// Returns a DataSource
that includes metadata and data file information,
/// as well as the current status of the DataSource
.
///
///
///
/// GetDataSource
provides results in normal or verbose format. The verbose
/// format adds the schema description and the list of files pointed to by the DataSource
/// to the normal format.
///
///
/// Container for the necessary parameters to execute the GetDataSource service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetDataSource Operation
public virtual Task GetDataSourceAsync(GetDataSourceRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = GetDataSourceRequestMarshaller.Instance;
options.ResponseUnmarshaller = GetDataSourceResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region GetEvaluation
///
/// Returns an Evaluation
that includes metadata as well as the current status
/// of the Evaluation
.
///
/// The ID of the Evaluation
to retrieve. The evaluation of each MLModel
is recorded and cataloged. The ID provides the means to access the information.
///
/// The response from the GetEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetEvaluation Operation
public virtual GetEvaluationResponse GetEvaluation(string evaluationId)
{
var request = new GetEvaluationRequest();
request.EvaluationId = evaluationId;
return GetEvaluation(request);
}
///
/// Returns an Evaluation
that includes metadata as well as the current status
/// of the Evaluation
.
///
/// Container for the necessary parameters to execute the GetEvaluation service method.
///
/// The response from the GetEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetEvaluation Operation
public virtual GetEvaluationResponse GetEvaluation(GetEvaluationRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = GetEvaluationRequestMarshaller.Instance;
options.ResponseUnmarshaller = GetEvaluationResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Returns an Evaluation
that includes metadata as well as the current status
/// of the Evaluation
.
///
/// The ID of the Evaluation
to retrieve. The evaluation of each MLModel
is recorded and cataloged. The ID provides the means to access the information.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetEvaluation Operation
public virtual Task GetEvaluationAsync(string evaluationId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new GetEvaluationRequest();
request.EvaluationId = evaluationId;
return GetEvaluationAsync(request, cancellationToken);
}
///
/// Returns an Evaluation
that includes metadata as well as the current status
/// of the Evaluation
.
///
/// Container for the necessary parameters to execute the GetEvaluation service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetEvaluation Operation
public virtual Task GetEvaluationAsync(GetEvaluationRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = GetEvaluationRequestMarshaller.Instance;
options.ResponseUnmarshaller = GetEvaluationResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region GetMLModel
///
/// Returns an MLModel
that includes detailed metadata, data source information,
/// and the current status of the MLModel
.
///
///
///
/// GetMLModel
provides results in normal or verbose format.
///
///
/// The ID assigned to the MLModel
at creation.
///
/// The response from the GetMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetMLModel Operation
public virtual GetMLModelResponse GetMLModel(string mlModelId)
{
var request = new GetMLModelRequest();
request.MLModelId = mlModelId;
return GetMLModel(request);
}
///
/// Returns an MLModel
that includes detailed metadata, data source information,
/// and the current status of the MLModel
.
///
///
///
/// GetMLModel
provides results in normal or verbose format.
///
///
/// The ID assigned to the MLModel
at creation.
/// Specifies whether the GetMLModel
operation should return Recipe
. If true, Recipe
is returned. If false, Recipe
is not returned.
///
/// The response from the GetMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetMLModel Operation
public virtual GetMLModelResponse GetMLModel(string mlModelId, bool verbose)
{
var request = new GetMLModelRequest();
request.MLModelId = mlModelId;
request.Verbose = verbose;
return GetMLModel(request);
}
///
/// Returns an MLModel
that includes detailed metadata, data source information,
/// and the current status of the MLModel
.
///
///
///
/// GetMLModel
provides results in normal or verbose format.
///
///
/// Container for the necessary parameters to execute the GetMLModel service method.
///
/// The response from the GetMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetMLModel Operation
public virtual GetMLModelResponse GetMLModel(GetMLModelRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = GetMLModelRequestMarshaller.Instance;
options.ResponseUnmarshaller = GetMLModelResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Returns an MLModel
that includes detailed metadata, data source information,
/// and the current status of the MLModel
.
///
///
///
/// GetMLModel
provides results in normal or verbose format.
///
///
/// The ID assigned to the MLModel
at creation.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetMLModel Operation
public virtual Task GetMLModelAsync(string mlModelId, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new GetMLModelRequest();
request.MLModelId = mlModelId;
return GetMLModelAsync(request, cancellationToken);
}
///
/// Returns an MLModel
that includes detailed metadata, data source information,
/// and the current status of the MLModel
.
///
///
///
/// GetMLModel
provides results in normal or verbose format.
///
///
/// The ID assigned to the MLModel
at creation.
/// Specifies whether the GetMLModel
operation should return Recipe
. If true, Recipe
is returned. If false, Recipe
is not returned.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetMLModel Operation
public virtual Task GetMLModelAsync(string mlModelId, bool verbose, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new GetMLModelRequest();
request.MLModelId = mlModelId;
request.Verbose = verbose;
return GetMLModelAsync(request, cancellationToken);
}
///
/// Returns an MLModel
that includes detailed metadata, data source information,
/// and the current status of the MLModel
.
///
///
///
/// GetMLModel
provides results in normal or verbose format.
///
///
/// Container for the necessary parameters to execute the GetMLModel service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the GetMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for GetMLModel Operation
public virtual Task GetMLModelAsync(GetMLModelRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = GetMLModelRequestMarshaller.Instance;
options.ResponseUnmarshaller = GetMLModelResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region Predict
///
/// Generates a prediction for the observation using the specified ML Model
.
///
///
///
/// Note: Not all response parameters will be populated. Whether a response parameter
/// is populated depends on the type of model requested.
///
///
/// A unique identifier of the MLModel
.
/// A property of PredictRequest used to execute the Predict service method.
/// A property of PredictRequest used to execute the Predict service method.
///
/// The response from the Predict service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// The subscriber exceeded the maximum number of operations. This exception can occur
/// when listing objects such as DataSource
.
///
///
/// The exception is thrown when a predict request is made to an unmounted MLModel
.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for Predict Operation
public virtual PredictResponse Predict(string mlModelId, string predictEndpoint, Dictionary record)
{
var request = new PredictRequest();
request.MLModelId = mlModelId;
request.PredictEndpoint = predictEndpoint;
request.Record = record;
return Predict(request);
}
///
/// Generates a prediction for the observation using the specified ML Model
.
///
///
///
/// Note: Not all response parameters will be populated. Whether a response parameter
/// is populated depends on the type of model requested.
///
///
/// Container for the necessary parameters to execute the Predict service method.
///
/// The response from the Predict service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// The subscriber exceeded the maximum number of operations. This exception can occur
/// when listing objects such as DataSource
.
///
///
/// The exception is thrown when a predict request is made to an unmounted MLModel
.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for Predict Operation
public virtual PredictResponse Predict(PredictRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = PredictRequestMarshaller.Instance;
options.ResponseUnmarshaller = PredictResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Generates a prediction for the observation using the specified ML Model
.
///
///
///
/// Note: Not all response parameters will be populated. Whether a response parameter
/// is populated depends on the type of model requested.
///
///
/// A unique identifier of the MLModel
.
/// A property of PredictRequest used to execute the Predict service method.
/// A property of PredictRequest used to execute the Predict service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the Predict service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// The subscriber exceeded the maximum number of operations. This exception can occur
/// when listing objects such as DataSource
.
///
///
/// The exception is thrown when a predict request is made to an unmounted MLModel
.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for Predict Operation
public virtual Task PredictAsync(string mlModelId, string predictEndpoint, Dictionary record, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new PredictRequest();
request.MLModelId = mlModelId;
request.PredictEndpoint = predictEndpoint;
request.Record = record;
return PredictAsync(request, cancellationToken);
}
///
/// Generates a prediction for the observation using the specified ML Model
.
///
///
///
/// Note: Not all response parameters will be populated. Whether a response parameter
/// is populated depends on the type of model requested.
///
///
/// Container for the necessary parameters to execute the Predict service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the Predict service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// The subscriber exceeded the maximum number of operations. This exception can occur
/// when listing objects such as DataSource
.
///
///
/// The exception is thrown when a predict request is made to an unmounted MLModel
.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for Predict Operation
public virtual Task PredictAsync(PredictRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = PredictRequestMarshaller.Instance;
options.ResponseUnmarshaller = PredictResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region UpdateBatchPrediction
///
/// Updates the BatchPredictionName
of a BatchPrediction
.
///
///
///
/// You can use the GetBatchPrediction
operation to view the contents of
/// the updated data element.
///
///
/// The ID assigned to the BatchPrediction
during creation.
/// A new user-supplied name or description of the BatchPrediction
.
///
/// The response from the UpdateBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateBatchPrediction Operation
public virtual UpdateBatchPredictionResponse UpdateBatchPrediction(string batchPredictionId, string batchPredictionName)
{
var request = new UpdateBatchPredictionRequest();
request.BatchPredictionId = batchPredictionId;
request.BatchPredictionName = batchPredictionName;
return UpdateBatchPrediction(request);
}
///
/// Updates the BatchPredictionName
of a BatchPrediction
.
///
///
///
/// You can use the GetBatchPrediction
operation to view the contents of
/// the updated data element.
///
///
/// Container for the necessary parameters to execute the UpdateBatchPrediction service method.
///
/// The response from the UpdateBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateBatchPrediction Operation
public virtual UpdateBatchPredictionResponse UpdateBatchPrediction(UpdateBatchPredictionRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = UpdateBatchPredictionRequestMarshaller.Instance;
options.ResponseUnmarshaller = UpdateBatchPredictionResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Updates the BatchPredictionName
of a BatchPrediction
.
///
///
///
/// You can use the GetBatchPrediction
operation to view the contents of
/// the updated data element.
///
///
/// The ID assigned to the BatchPrediction
during creation.
/// A new user-supplied name or description of the BatchPrediction
.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the UpdateBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateBatchPrediction Operation
public virtual Task UpdateBatchPredictionAsync(string batchPredictionId, string batchPredictionName, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new UpdateBatchPredictionRequest();
request.BatchPredictionId = batchPredictionId;
request.BatchPredictionName = batchPredictionName;
return UpdateBatchPredictionAsync(request, cancellationToken);
}
///
/// Updates the BatchPredictionName
of a BatchPrediction
.
///
///
///
/// You can use the GetBatchPrediction
operation to view the contents of
/// the updated data element.
///
///
/// Container for the necessary parameters to execute the UpdateBatchPrediction service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the UpdateBatchPrediction service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateBatchPrediction Operation
public virtual Task UpdateBatchPredictionAsync(UpdateBatchPredictionRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = UpdateBatchPredictionRequestMarshaller.Instance;
options.ResponseUnmarshaller = UpdateBatchPredictionResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region UpdateDataSource
///
/// Updates the DataSourceName
of a DataSource
.
///
///
///
/// You can use the GetDataSource
operation to view the contents of the updated
/// data element.
///
///
/// The ID assigned to the DataSource
during creation.
/// A new user-supplied name or description of the DataSource
that will replace the current description.
///
/// The response from the UpdateDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateDataSource Operation
public virtual UpdateDataSourceResponse UpdateDataSource(string dataSourceId, string dataSourceName)
{
var request = new UpdateDataSourceRequest();
request.DataSourceId = dataSourceId;
request.DataSourceName = dataSourceName;
return UpdateDataSource(request);
}
///
/// Updates the DataSourceName
of a DataSource
.
///
///
///
/// You can use the GetDataSource
operation to view the contents of the updated
/// data element.
///
///
/// Container for the necessary parameters to execute the UpdateDataSource service method.
///
/// The response from the UpdateDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateDataSource Operation
public virtual UpdateDataSourceResponse UpdateDataSource(UpdateDataSourceRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = UpdateDataSourceRequestMarshaller.Instance;
options.ResponseUnmarshaller = UpdateDataSourceResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Updates the DataSourceName
of a DataSource
.
///
///
///
/// You can use the GetDataSource
operation to view the contents of the updated
/// data element.
///
///
/// The ID assigned to the DataSource
during creation.
/// A new user-supplied name or description of the DataSource
that will replace the current description.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the UpdateDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateDataSource Operation
public virtual Task UpdateDataSourceAsync(string dataSourceId, string dataSourceName, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new UpdateDataSourceRequest();
request.DataSourceId = dataSourceId;
request.DataSourceName = dataSourceName;
return UpdateDataSourceAsync(request, cancellationToken);
}
///
/// Updates the DataSourceName
of a DataSource
.
///
///
///
/// You can use the GetDataSource
operation to view the contents of the updated
/// data element.
///
///
/// Container for the necessary parameters to execute the UpdateDataSource service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the UpdateDataSource service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateDataSource Operation
public virtual Task UpdateDataSourceAsync(UpdateDataSourceRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = UpdateDataSourceRequestMarshaller.Instance;
options.ResponseUnmarshaller = UpdateDataSourceResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region UpdateEvaluation
///
/// Updates the EvaluationName
of an Evaluation
.
///
///
///
/// You can use the GetEvaluation
operation to view the contents of the updated
/// data element.
///
///
/// The ID assigned to the Evaluation
during creation.
/// A new user-supplied name or description of the Evaluation
that will replace the current content.
///
/// The response from the UpdateEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateEvaluation Operation
public virtual UpdateEvaluationResponse UpdateEvaluation(string evaluationId, string evaluationName)
{
var request = new UpdateEvaluationRequest();
request.EvaluationId = evaluationId;
request.EvaluationName = evaluationName;
return UpdateEvaluation(request);
}
///
/// Updates the EvaluationName
of an Evaluation
.
///
///
///
/// You can use the GetEvaluation
operation to view the contents of the updated
/// data element.
///
///
/// Container for the necessary parameters to execute the UpdateEvaluation service method.
///
/// The response from the UpdateEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateEvaluation Operation
public virtual UpdateEvaluationResponse UpdateEvaluation(UpdateEvaluationRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = UpdateEvaluationRequestMarshaller.Instance;
options.ResponseUnmarshaller = UpdateEvaluationResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Updates the EvaluationName
of an Evaluation
.
///
///
///
/// You can use the GetEvaluation
operation to view the contents of the updated
/// data element.
///
///
/// The ID assigned to the Evaluation
during creation.
/// A new user-supplied name or description of the Evaluation
that will replace the current content.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the UpdateEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateEvaluation Operation
public virtual Task UpdateEvaluationAsync(string evaluationId, string evaluationName, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new UpdateEvaluationRequest();
request.EvaluationId = evaluationId;
request.EvaluationName = evaluationName;
return UpdateEvaluationAsync(request, cancellationToken);
}
///
/// Updates the EvaluationName
of an Evaluation
.
///
///
///
/// You can use the GetEvaluation
operation to view the contents of the updated
/// data element.
///
///
/// Container for the necessary parameters to execute the UpdateEvaluation service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the UpdateEvaluation service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateEvaluation Operation
public virtual Task UpdateEvaluationAsync(UpdateEvaluationRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = UpdateEvaluationRequestMarshaller.Instance;
options.ResponseUnmarshaller = UpdateEvaluationResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
#region UpdateMLModel
///
/// Updates the MLModelName
and the ScoreThreshold
of an MLModel
.
///
///
///
/// You can use the GetMLModel
operation to view the contents of the updated
/// data element.
///
///
/// The ID assigned to the MLModel
during creation.
/// A user-supplied name or description of the MLModel
.
/// The ScoreThreshold
used in binary classification MLModel
that marks the boundary between a positive prediction and a negative prediction. Output values greater than or equal to the ScoreThreshold
receive a positive result from the MLModel
, such as true
. Output values less than the ScoreThreshold
receive a negative response from the MLModel
, such as false
.
///
/// The response from the UpdateMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateMLModel Operation
public virtual UpdateMLModelResponse UpdateMLModel(string mlModelId, string mlModelName, float scoreThreshold)
{
var request = new UpdateMLModelRequest();
request.MLModelId = mlModelId;
request.MLModelName = mlModelName;
request.ScoreThreshold = scoreThreshold;
return UpdateMLModel(request);
}
///
/// Updates the MLModelName
and the ScoreThreshold
of an MLModel
.
///
///
///
/// You can use the GetMLModel
operation to view the contents of the updated
/// data element.
///
///
/// Container for the necessary parameters to execute the UpdateMLModel service method.
///
/// The response from the UpdateMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateMLModel Operation
public virtual UpdateMLModelResponse UpdateMLModel(UpdateMLModelRequest request)
{
var options = new InvokeOptions();
options.RequestMarshaller = UpdateMLModelRequestMarshaller.Instance;
options.ResponseUnmarshaller = UpdateMLModelResponseUnmarshaller.Instance;
return Invoke(request, options);
}
///
/// Updates the MLModelName
and the ScoreThreshold
of an MLModel
.
///
///
///
/// You can use the GetMLModel
operation to view the contents of the updated
/// data element.
///
///
/// The ID assigned to the MLModel
during creation.
/// A user-supplied name or description of the MLModel
.
/// The ScoreThreshold
used in binary classification MLModel
that marks the boundary between a positive prediction and a negative prediction. Output values greater than or equal to the ScoreThreshold
receive a positive result from the MLModel
, such as true
. Output values less than the ScoreThreshold
receive a negative response from the MLModel
, such as false
.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the UpdateMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateMLModel Operation
public virtual Task UpdateMLModelAsync(string mlModelId, string mlModelName, float scoreThreshold, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var request = new UpdateMLModelRequest();
request.MLModelId = mlModelId;
request.MLModelName = mlModelName;
request.ScoreThreshold = scoreThreshold;
return UpdateMLModelAsync(request, cancellationToken);
}
///
/// Updates the MLModelName
and the ScoreThreshold
of an MLModel
.
///
///
///
/// You can use the GetMLModel
operation to view the contents of the updated
/// data element.
///
///
/// Container for the necessary parameters to execute the UpdateMLModel service method.
///
/// A cancellation token that can be used by other objects or threads to receive notice of cancellation.
///
///
/// The response from the UpdateMLModel service method, as returned by MachineLearning.
///
/// An error on the server occurred when trying to process a request.
///
///
/// An error on the client occurred. Typically, the cause is an invalid input value.
///
///
/// A specified resource cannot be located.
///
/// REST API Reference for UpdateMLModel Operation
public virtual Task UpdateMLModelAsync(UpdateMLModelRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
{
var options = new InvokeOptions();
options.RequestMarshaller = UpdateMLModelRequestMarshaller.Instance;
options.ResponseUnmarshaller = UpdateMLModelResponseUnmarshaller.Instance;
return InvokeAsync(request, options, cancellationToken);
}
#endregion
}
}