/*
* Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.sagemaker.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeTrainingJobResult extends com.amazonaws.AmazonWebServiceResult
* Name of the model training job.
*
* The Amazon Resource Name (ARN) of the training job.
*
* The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a
* hyperparameter tuning job.
*
* The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the transform or training
* job.
*
* The Amazon Resource Name (ARN) of an AutoML job.
*
* Information about the Amazon S3 location that is configured for storing model artifacts.
*
* The status of the training job.
*
* SageMaker provides the following training job statuses:
*
*
*
*
*
*
* For more detailed information, see
* Provides detailed information about the state of the training job. For detailed information on the secondary
* status of the training job, see
* SageMaker provides primary statuses and secondary statuses that apply to each of them:
*
*
*
*
*
*
*
*
*
*
*
*
* Valid values for
* We no longer support the following secondary statuses:
*
*
*
*
* If the training job failed, the reason it failed.
*
* Algorithm-specific parameters.
*
* Information about the algorithm used for training, and algorithm metadata.
*
* The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.
*
* An array of
* The S3 path where model artifacts that you configured when creating the job are stored. SageMaker creates
* subfolders for model artifacts.
*
* Resources, including ML compute instances and ML storage volumes, that are configured for model training.
*
* A VpcConfig object
* that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an Amazon
* Virtual Private Cloud.
*
* Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training
* job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap
* model training costs.
*
* To stop a job, SageMaker sends the algorithm the
* A timestamp that indicates when the training job was created.
*
* Indicates the time when the training job starts on training instances. You are billed for the time interval
* between this time and the value of
* Indicates the time when the training job ends on training instances. You are billed for the time interval between
* the value of
* A timestamp that indicates when the status of the training job was last modified.
*
* A history of all of the secondary statuses that the training job has transitioned through.
*
* A collection of
* If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster
* for distributed training, choose
* To encrypt all communications between ML compute instances in distributed training, choose
* A Boolean indicating whether managed spot training is enabled (
* The training time in seconds.
*
* The billable time in seconds. Billable time refers to the absolute wall-clock time.
*
* Multiply
* You can calculate the savings from using managed spot training using the formula
*
* Configuration information for Amazon SageMaker Debugger rules for debugging output tensors.
*
* Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job.
*
* Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics.
*
* Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job.
*
* Profiling status of a training job.
*
* The number of times to retry the job when the job fails due to an
* The environment variables to set in the Docker container.
*
* The status of the warm pool associated with the training job.
*
* Name of the model training job.
*
* Name of the model training job.
*
* Name of the model training job.
*
* The Amazon Resource Name (ARN) of the training job.
*
* The Amazon Resource Name (ARN) of the training job.
*
* The Amazon Resource Name (ARN) of the training job.
*
* The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a
* hyperparameter tuning job.
*
* The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a
* hyperparameter tuning job.
*
* The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a
* hyperparameter tuning job.
*
* The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the transform or training
* job.
*
* The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the transform or training
* job.
*
* The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the transform or training
* job.
*
* The Amazon Resource Name (ARN) of an AutoML job.
*
* The Amazon Resource Name (ARN) of an AutoML job.
*
* The Amazon Resource Name (ARN) of an AutoML job.
*
* Information about the Amazon S3 location that is configured for storing model artifacts.
*
* Information about the Amazon S3 location that is configured for storing model artifacts.
*
* Information about the Amazon S3 location that is configured for storing model artifacts.
*
* The status of the training job.
*
* SageMaker provides the following training job statuses:
*
*
*
*
*
*
* For more detailed information, see
*
* InProgress
- The training is in progress.
* Completed
- The training job has completed.
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
* Stopping
- The training job is stopping.
* Stopped
- The training job has stopped.
* SecondaryStatus
.
* StatusMessage
under SecondaryStatusTransition.
*
*
*
*
* Starting
- Starting the training job.
* Downloading
- An optional stage for algorithms that support File
training input mode.
* It indicates that data is being downloaded to the ML storage volumes.
* Training
- Training is in progress.
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3 location.
*
*
* Completed
- The training job has completed.
*
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
* Stopped
- The training job has stopped.
*
*
* Stopping
- Stopping the training job.
* SecondaryStatus
are subject to change.
*
*
*/
private String secondaryStatus;
/**
* LaunchingMLInstances
* PreparingTraining
* DownloadingTrainingImage
* Channel
objects that describes each data input channel.
* SIGTERM
signal, which delays job termination for
* 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training
* are not lost.
* TrainingEndTime
. The start time in CloudWatch Logs might be later
* than this time. The difference is due to the time it takes to download the training data and to the size of the
* training container.
* TrainingStartTime
and this time. For successful jobs and stopped jobs, this is the time
* after model artifacts are uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
* MetricData
objects that specify the names, values, and dates and times that the
* training algorithm emitted to Amazon CloudWatch.
* True
. If you enable network isolation for training jobs that are
* configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified
* VPC, but the training container does not have network access.
* True
.
* Encryption provides greater security for distributed training, but training might take longer. How long it takes
* depends on the amount of communication between compute instances, especially if you use a deep learning
* algorithms in distributed training.
* True
) or not (False
).
* BillableTimeInSeconds
by the number of instances (InstanceCount
) in your
* training cluster to get the total compute time SageMaker bills you if you run distributed training. The formula
* is as follows: BillableTimeInSeconds * InstanceCount
.
* (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if
* BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is 80%.
* InternalServerError
.
*
*
* InProgress
- The training is in progress.
* Completed
- The training job has completed.
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
* Stopping
- The training job is stopping.
* Stopped
- The training job has stopped.
* SecondaryStatus
.
*
* SageMaker provides the following training job statuses: *
*
* InProgress
- The training is in progress.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
*
* Stopping
- The training job is stopping.
*
* Stopped
- The training job has stopped.
*
* For more detailed information, see SecondaryStatus
.
* @see TrainingJobStatus
*/
public void setTrainingJobStatus(String trainingJobStatus) {
this.trainingJobStatus = trainingJobStatus;
}
/**
*
* The status of the training job. *
** SageMaker provides the following training job statuses: *
*
* InProgress
- The training is in progress.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
*
* Stopping
- The training job is stopping.
*
* Stopped
- The training job has stopped.
*
* For more detailed information, see SecondaryStatus
.
*
* SageMaker provides the following training job statuses: *
*
* InProgress
- The training is in progress.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
*
* Stopping
- The training job is stopping.
*
* Stopped
- The training job has stopped.
*
* For more detailed information, see SecondaryStatus
.
* @see TrainingJobStatus
*/
public String getTrainingJobStatus() {
return this.trainingJobStatus;
}
/**
*
* The status of the training job. *
** SageMaker provides the following training job statuses: *
*
* InProgress
- The training is in progress.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
*
* Stopping
- The training job is stopping.
*
* Stopped
- The training job has stopped.
*
* For more detailed information, see SecondaryStatus
.
*
* SageMaker provides the following training job statuses: *
*
* InProgress
- The training is in progress.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
*
* Stopping
- The training job is stopping.
*
* Stopped
- The training job has stopped.
*
* For more detailed information, see SecondaryStatus
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see TrainingJobStatus
*/
public DescribeTrainingJobResult withTrainingJobStatus(String trainingJobStatus) {
setTrainingJobStatus(trainingJobStatus);
return this;
}
/**
*
* The status of the training job. *
** SageMaker provides the following training job statuses: *
*
* InProgress
- The training is in progress.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
*
* Stopping
- The training job is stopping.
*
* Stopped
- The training job has stopped.
*
* For more detailed information, see SecondaryStatus
.
*
* SageMaker provides the following training job statuses: *
*
* InProgress
- The training is in progress.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. To see the reason for the failure, see the
* FailureReason
field in the response to a DescribeTrainingJobResponse
call.
*
* Stopping
- The training job is stopping.
*
* Stopped
- The training job has stopped.
*
* For more detailed information, see SecondaryStatus
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see TrainingJobStatus
*/
public DescribeTrainingJobResult withTrainingJobStatus(TrainingJobStatus trainingJobStatus) {
this.trainingJobStatus = trainingJobStatus.toString();
return this;
}
/**
*
* Provides detailed information about the state of the training job. For detailed information on the secondary
* status of the training job, see StatusMessage
under SecondaryStatusTransition.
*
* SageMaker provides primary statuses and secondary statuses that apply to each of them: *
*
* Starting
- Starting the training job.
*
* Downloading
- An optional stage for algorithms that support File
training input mode.
* It indicates that data is being downloaded to the ML storage volumes.
*
* Training
- Training is in progress.
*
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
*
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3 location.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
*
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
*
* Stopped
- The training job has stopped.
*
* Stopping
- Stopping the training job.
*
* Valid values for SecondaryStatus
are subject to change.
*
* We no longer support the following secondary statuses: *
*
* LaunchingMLInstances
*
* PreparingTraining
*
* DownloadingTrainingImage
*
StatusMessage
under SecondaryStatusTransition.
* * SageMaker provides primary statuses and secondary statuses that apply to each of them: *
*
* Starting
- Starting the training job.
*
* Downloading
- An optional stage for algorithms that support File
training input
* mode. It indicates that data is being downloaded to the ML storage volumes.
*
* Training
- Training is in progress.
*
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
*
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3
* location.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
*
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
*
* Stopped
- The training job has stopped.
*
* Stopping
- Stopping the training job.
*
* Valid values for SecondaryStatus
are subject to change.
*
* We no longer support the following secondary statuses: *
*
* LaunchingMLInstances
*
* PreparingTraining
*
* DownloadingTrainingImage
*
* Provides detailed information about the state of the training job. For detailed information on the secondary
* status of the training job, see StatusMessage
under SecondaryStatusTransition.
*
* SageMaker provides primary statuses and secondary statuses that apply to each of them: *
*
* Starting
- Starting the training job.
*
* Downloading
- An optional stage for algorithms that support File
training input mode.
* It indicates that data is being downloaded to the ML storage volumes.
*
* Training
- Training is in progress.
*
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
*
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3 location.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
*
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
*
* Stopped
- The training job has stopped.
*
* Stopping
- Stopping the training job.
*
* Valid values for SecondaryStatus
are subject to change.
*
* We no longer support the following secondary statuses: *
*
* LaunchingMLInstances
*
* PreparingTraining
*
* DownloadingTrainingImage
*
StatusMessage
under SecondaryStatusTransition.
* * SageMaker provides primary statuses and secondary statuses that apply to each of them: *
*
* Starting
- Starting the training job.
*
* Downloading
- An optional stage for algorithms that support File
training input
* mode. It indicates that data is being downloaded to the ML storage volumes.
*
* Training
- Training is in progress.
*
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
*
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3
* location.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
*
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
*
* Stopped
- The training job has stopped.
*
* Stopping
- Stopping the training job.
*
* Valid values for SecondaryStatus
are subject to change.
*
* We no longer support the following secondary statuses: *
*
* LaunchingMLInstances
*
* PreparingTraining
*
* DownloadingTrainingImage
*
* Provides detailed information about the state of the training job. For detailed information on the secondary
* status of the training job, see StatusMessage
under SecondaryStatusTransition.
*
* SageMaker provides primary statuses and secondary statuses that apply to each of them: *
*
* Starting
- Starting the training job.
*
* Downloading
- An optional stage for algorithms that support File
training input mode.
* It indicates that data is being downloaded to the ML storage volumes.
*
* Training
- Training is in progress.
*
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
*
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3 location.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
*
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
*
* Stopped
- The training job has stopped.
*
* Stopping
- Stopping the training job.
*
* Valid values for SecondaryStatus
are subject to change.
*
* We no longer support the following secondary statuses: *
*
* LaunchingMLInstances
*
* PreparingTraining
*
* DownloadingTrainingImage
*
StatusMessage
under SecondaryStatusTransition.
* * SageMaker provides primary statuses and secondary statuses that apply to each of them: *
*
* Starting
- Starting the training job.
*
* Downloading
- An optional stage for algorithms that support File
training input
* mode. It indicates that data is being downloaded to the ML storage volumes.
*
* Training
- Training is in progress.
*
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
*
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3
* location.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
*
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
*
* Stopped
- The training job has stopped.
*
* Stopping
- Stopping the training job.
*
* Valid values for SecondaryStatus
are subject to change.
*
* We no longer support the following secondary statuses: *
*
* LaunchingMLInstances
*
* PreparingTraining
*
* DownloadingTrainingImage
*
* Provides detailed information about the state of the training job. For detailed information on the secondary
* status of the training job, see StatusMessage
under SecondaryStatusTransition.
*
* SageMaker provides primary statuses and secondary statuses that apply to each of them: *
*
* Starting
- Starting the training job.
*
* Downloading
- An optional stage for algorithms that support File
training input mode.
* It indicates that data is being downloaded to the ML storage volumes.
*
* Training
- Training is in progress.
*
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
*
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3 location.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
*
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
*
* Stopped
- The training job has stopped.
*
* Stopping
- Stopping the training job.
*
* Valid values for SecondaryStatus
are subject to change.
*
* We no longer support the following secondary statuses: *
*
* LaunchingMLInstances
*
* PreparingTraining
*
* DownloadingTrainingImage
*
StatusMessage
under SecondaryStatusTransition.
* * SageMaker provides primary statuses and secondary statuses that apply to each of them: *
*
* Starting
- Starting the training job.
*
* Downloading
- An optional stage for algorithms that support File
training input
* mode. It indicates that data is being downloaded to the ML storage volumes.
*
* Training
- Training is in progress.
*
* Interrupted
- The job stopped because the managed spot training instances were interrupted.
*
* Uploading
- Training is complete and the model artifacts are being uploaded to the S3
* location.
*
* Completed
- The training job has completed.
*
* Failed
- The training job has failed. The reason for the failure is returned in the
* FailureReason
field of DescribeTrainingJobResponse
.
*
* MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime.
*
* MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time.
*
* Stopped
- The training job has stopped.
*
* Stopping
- Stopping the training job.
*
* Valid values for SecondaryStatus
are subject to change.
*
* We no longer support the following secondary statuses: *
*
* LaunchingMLInstances
*
* PreparingTraining
*
* DownloadingTrainingImage
*
* If the training job failed, the reason it failed. *
* * @param failureReason * If the training job failed, the reason it failed. */ public void setFailureReason(String failureReason) { this.failureReason = failureReason; } /** ** If the training job failed, the reason it failed. *
* * @return If the training job failed, the reason it failed. */ public String getFailureReason() { return this.failureReason; } /** ** If the training job failed, the reason it failed. *
* * @param failureReason * If the training job failed, the reason it failed. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withFailureReason(String failureReason) { setFailureReason(failureReason); return this; } /** ** Algorithm-specific parameters. *
* * @return Algorithm-specific parameters. */ public java.util.Map* Algorithm-specific parameters. *
* * @param hyperParameters * Algorithm-specific parameters. */ public void setHyperParameters(java.util.Map* Algorithm-specific parameters. *
* * @param hyperParameters * Algorithm-specific parameters. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withHyperParameters(java.util.Map* Information about the algorithm used for training, and algorithm metadata. *
* * @param algorithmSpecification * Information about the algorithm used for training, and algorithm metadata. */ public void setAlgorithmSpecification(AlgorithmSpecification algorithmSpecification) { this.algorithmSpecification = algorithmSpecification; } /** ** Information about the algorithm used for training, and algorithm metadata. *
* * @return Information about the algorithm used for training, and algorithm metadata. */ public AlgorithmSpecification getAlgorithmSpecification() { return this.algorithmSpecification; } /** ** Information about the algorithm used for training, and algorithm metadata. *
* * @param algorithmSpecification * Information about the algorithm used for training, and algorithm metadata. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withAlgorithmSpecification(AlgorithmSpecification algorithmSpecification) { setAlgorithmSpecification(algorithmSpecification); return this; } /** ** The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. *
* * @param roleArn * The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. */ public void setRoleArn(String roleArn) { this.roleArn = roleArn; } /** ** The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. *
* * @return The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. */ public String getRoleArn() { return this.roleArn; } /** ** The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. *
* * @param roleArn * The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withRoleArn(String roleArn) { setRoleArn(roleArn); return this; } /** *
* An array of Channel
objects that describes each data input channel.
*
Channel
objects that describes each data input channel.
*/
public java.util.List
* An array of Channel
objects that describes each data input channel.
*
Channel
objects that describes each data input channel.
*/
public void setInputDataConfig(java.util.Collection
* An array of Channel
objects that describes each data input channel.
*
* NOTE: This method appends the values to the existing list (if any). Use * {@link #setInputDataConfig(java.util.Collection)} or {@link #withInputDataConfig(java.util.Collection)} if you * want to override the existing values. *
* * @param inputDataConfig * An array ofChannel
objects that describes each data input channel.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withInputDataConfig(Channel... inputDataConfig) {
if (this.inputDataConfig == null) {
setInputDataConfig(new java.util.ArrayList
* An array of Channel
objects that describes each data input channel.
*
Channel
objects that describes each data input channel.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withInputDataConfig(java.util.Collection* The S3 path where model artifacts that you configured when creating the job are stored. SageMaker creates * subfolders for model artifacts. *
* * @param outputDataConfig * The S3 path where model artifacts that you configured when creating the job are stored. SageMaker creates * subfolders for model artifacts. */ public void setOutputDataConfig(OutputDataConfig outputDataConfig) { this.outputDataConfig = outputDataConfig; } /** ** The S3 path where model artifacts that you configured when creating the job are stored. SageMaker creates * subfolders for model artifacts. *
* * @return The S3 path where model artifacts that you configured when creating the job are stored. SageMaker creates * subfolders for model artifacts. */ public OutputDataConfig getOutputDataConfig() { return this.outputDataConfig; } /** ** The S3 path where model artifacts that you configured when creating the job are stored. SageMaker creates * subfolders for model artifacts. *
* * @param outputDataConfig * The S3 path where model artifacts that you configured when creating the job are stored. SageMaker creates * subfolders for model artifacts. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withOutputDataConfig(OutputDataConfig outputDataConfig) { setOutputDataConfig(outputDataConfig); return this; } /** ** Resources, including ML compute instances and ML storage volumes, that are configured for model training. *
* * @param resourceConfig * Resources, including ML compute instances and ML storage volumes, that are configured for model training. */ public void setResourceConfig(ResourceConfig resourceConfig) { this.resourceConfig = resourceConfig; } /** ** Resources, including ML compute instances and ML storage volumes, that are configured for model training. *
* * @return Resources, including ML compute instances and ML storage volumes, that are configured for model training. */ public ResourceConfig getResourceConfig() { return this.resourceConfig; } /** ** Resources, including ML compute instances and ML storage volumes, that are configured for model training. *
* * @param resourceConfig * Resources, including ML compute instances and ML storage volumes, that are configured for model training. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withResourceConfig(ResourceConfig resourceConfig) { setResourceConfig(resourceConfig); return this; } /** ** A VpcConfig object * that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an Amazon * Virtual Private Cloud. *
* * @param vpcConfig * A VpcConfig * object that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an * Amazon Virtual Private Cloud. */ public void setVpcConfig(VpcConfig vpcConfig) { this.vpcConfig = vpcConfig; } /** ** A VpcConfig object * that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an Amazon * Virtual Private Cloud. *
* * @return A VpcConfig * object that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an * Amazon Virtual Private Cloud. */ public VpcConfig getVpcConfig() { return this.vpcConfig; } /** ** A VpcConfig object * that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an Amazon * Virtual Private Cloud. *
* * @param vpcConfig * A VpcConfig * object that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an * Amazon Virtual Private Cloud. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withVpcConfig(VpcConfig vpcConfig) { setVpcConfig(vpcConfig); return this; } /** ** Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training * job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs. *
*
* To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for
* 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training
* are not lost.
*
* To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination
* for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of
* training are not lost.
*/
public void setStoppingCondition(StoppingCondition stoppingCondition) {
this.stoppingCondition = stoppingCondition;
}
/**
*
* Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training * job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs. *
*
* To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for
* 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training
* are not lost.
*
* To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job
* termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so
* the results of training are not lost.
*/
public StoppingCondition getStoppingCondition() {
return this.stoppingCondition;
}
/**
*
* Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training * job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap * model training costs. *
*
* To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for
* 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training
* are not lost.
*
* To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination
* for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of
* training are not lost.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withStoppingCondition(StoppingCondition stoppingCondition) {
setStoppingCondition(stoppingCondition);
return this;
}
/**
*
* A timestamp that indicates when the training job was created. *
* * @param creationTime * A timestamp that indicates when the training job was created. */ public void setCreationTime(java.util.Date creationTime) { this.creationTime = creationTime; } /** ** A timestamp that indicates when the training job was created. *
* * @return A timestamp that indicates when the training job was created. */ public java.util.Date getCreationTime() { return this.creationTime; } /** ** A timestamp that indicates when the training job was created. *
* * @param creationTime * A timestamp that indicates when the training job was created. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withCreationTime(java.util.Date creationTime) { setCreationTime(creationTime); return this; } /** *
* Indicates the time when the training job starts on training instances. You are billed for the time interval
* between this time and the value of TrainingEndTime
. The start time in CloudWatch Logs might be later
* than this time. The difference is due to the time it takes to download the training data and to the size of the
* training container.
*
TrainingEndTime
. The start time in CloudWatch
* Logs might be later than this time. The difference is due to the time it takes to download the training
* data and to the size of the training container.
*/
public void setTrainingStartTime(java.util.Date trainingStartTime) {
this.trainingStartTime = trainingStartTime;
}
/**
*
* Indicates the time when the training job starts on training instances. You are billed for the time interval
* between this time and the value of TrainingEndTime
. The start time in CloudWatch Logs might be later
* than this time. The difference is due to the time it takes to download the training data and to the size of the
* training container.
*
TrainingEndTime
. The start time in CloudWatch
* Logs might be later than this time. The difference is due to the time it takes to download the training
* data and to the size of the training container.
*/
public java.util.Date getTrainingStartTime() {
return this.trainingStartTime;
}
/**
*
* Indicates the time when the training job starts on training instances. You are billed for the time interval
* between this time and the value of TrainingEndTime
. The start time in CloudWatch Logs might be later
* than this time. The difference is due to the time it takes to download the training data and to the size of the
* training container.
*
TrainingEndTime
. The start time in CloudWatch
* Logs might be later than this time. The difference is due to the time it takes to download the training
* data and to the size of the training container.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withTrainingStartTime(java.util.Date trainingStartTime) {
setTrainingStartTime(trainingStartTime);
return this;
}
/**
*
* Indicates the time when the training job ends on training instances. You are billed for the time interval between
* the value of TrainingStartTime
and this time. For successful jobs and stopped jobs, this is the time
* after model artifacts are uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
*
TrainingStartTime
and this time. For successful jobs and stopped jobs,
* this is the time after model artifacts are uploaded. For failed jobs, this is the time when SageMaker
* detects a job failure.
*/
public void setTrainingEndTime(java.util.Date trainingEndTime) {
this.trainingEndTime = trainingEndTime;
}
/**
*
* Indicates the time when the training job ends on training instances. You are billed for the time interval between
* the value of TrainingStartTime
and this time. For successful jobs and stopped jobs, this is the time
* after model artifacts are uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
*
TrainingStartTime
and this time. For successful jobs and stopped jobs,
* this is the time after model artifacts are uploaded. For failed jobs, this is the time when SageMaker
* detects a job failure.
*/
public java.util.Date getTrainingEndTime() {
return this.trainingEndTime;
}
/**
*
* Indicates the time when the training job ends on training instances. You are billed for the time interval between
* the value of TrainingStartTime
and this time. For successful jobs and stopped jobs, this is the time
* after model artifacts are uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
*
TrainingStartTime
and this time. For successful jobs and stopped jobs,
* this is the time after model artifacts are uploaded. For failed jobs, this is the time when SageMaker
* detects a job failure.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withTrainingEndTime(java.util.Date trainingEndTime) {
setTrainingEndTime(trainingEndTime);
return this;
}
/**
* * A timestamp that indicates when the status of the training job was last modified. *
* * @param lastModifiedTime * A timestamp that indicates when the status of the training job was last modified. */ public void setLastModifiedTime(java.util.Date lastModifiedTime) { this.lastModifiedTime = lastModifiedTime; } /** ** A timestamp that indicates when the status of the training job was last modified. *
* * @return A timestamp that indicates when the status of the training job was last modified. */ public java.util.Date getLastModifiedTime() { return this.lastModifiedTime; } /** ** A timestamp that indicates when the status of the training job was last modified. *
* * @param lastModifiedTime * A timestamp that indicates when the status of the training job was last modified. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withLastModifiedTime(java.util.Date lastModifiedTime) { setLastModifiedTime(lastModifiedTime); return this; } /** ** A history of all of the secondary statuses that the training job has transitioned through. *
* * @return A history of all of the secondary statuses that the training job has transitioned through. */ public java.util.List* A history of all of the secondary statuses that the training job has transitioned through. *
* * @param secondaryStatusTransitions * A history of all of the secondary statuses that the training job has transitioned through. */ public void setSecondaryStatusTransitions(java.util.Collection* A history of all of the secondary statuses that the training job has transitioned through. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setSecondaryStatusTransitions(java.util.Collection)} or * {@link #withSecondaryStatusTransitions(java.util.Collection)} if you want to override the existing values. *
* * @param secondaryStatusTransitions * A history of all of the secondary statuses that the training job has transitioned through. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withSecondaryStatusTransitions(SecondaryStatusTransition... secondaryStatusTransitions) { if (this.secondaryStatusTransitions == null) { setSecondaryStatusTransitions(new java.util.ArrayList* A history of all of the secondary statuses that the training job has transitioned through. *
* * @param secondaryStatusTransitions * A history of all of the secondary statuses that the training job has transitioned through. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withSecondaryStatusTransitions(java.util.Collection
* A collection of MetricData
objects that specify the names, values, and dates and times that the
* training algorithm emitted to Amazon CloudWatch.
*
MetricData
objects that specify the names, values, and dates and times that
* the training algorithm emitted to Amazon CloudWatch.
*/
public java.util.List
* A collection of MetricData
objects that specify the names, values, and dates and times that the
* training algorithm emitted to Amazon CloudWatch.
*
MetricData
objects that specify the names, values, and dates and times that
* the training algorithm emitted to Amazon CloudWatch.
*/
public void setFinalMetricDataList(java.util.Collection
* A collection of MetricData
objects that specify the names, values, and dates and times that the
* training algorithm emitted to Amazon CloudWatch.
*
* NOTE: This method appends the values to the existing list (if any). Use * {@link #setFinalMetricDataList(java.util.Collection)} or {@link #withFinalMetricDataList(java.util.Collection)} * if you want to override the existing values. *
* * @param finalMetricDataList * A collection ofMetricData
objects that specify the names, values, and dates and times that
* the training algorithm emitted to Amazon CloudWatch.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withFinalMetricDataList(MetricData... finalMetricDataList) {
if (this.finalMetricDataList == null) {
setFinalMetricDataList(new java.util.ArrayList
* A collection of MetricData
objects that specify the names, values, and dates and times that the
* training algorithm emitted to Amazon CloudWatch.
*
MetricData
objects that specify the names, values, and dates and times that
* the training algorithm emitted to Amazon CloudWatch.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withFinalMetricDataList(java.util.Collection
* If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster
* for distributed training, choose True
. If you enable network isolation for training jobs that are
* configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified
* VPC, but the training container does not have network access.
*
True
. If you enable network isolation for training
* jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts
* through the specified VPC, but the training container does not have network access.
*/
public void setEnableNetworkIsolation(Boolean enableNetworkIsolation) {
this.enableNetworkIsolation = enableNetworkIsolation;
}
/**
*
* If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster
* for distributed training, choose True
. If you enable network isolation for training jobs that are
* configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified
* VPC, but the training container does not have network access.
*
True
. If you enable network isolation for training
* jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts
* through the specified VPC, but the training container does not have network access.
*/
public Boolean getEnableNetworkIsolation() {
return this.enableNetworkIsolation;
}
/**
*
* If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster
* for distributed training, choose True
. If you enable network isolation for training jobs that are
* configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified
* VPC, but the training container does not have network access.
*
True
. If you enable network isolation for training
* jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts
* through the specified VPC, but the training container does not have network access.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withEnableNetworkIsolation(Boolean enableNetworkIsolation) {
setEnableNetworkIsolation(enableNetworkIsolation);
return this;
}
/**
*
* If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster
* for distributed training, choose True
. If you enable network isolation for training jobs that are
* configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified
* VPC, but the training container does not have network access.
*
True
. If you enable network isolation for training
* jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts
* through the specified VPC, but the training container does not have network access.
*/
public Boolean isEnableNetworkIsolation() {
return this.enableNetworkIsolation;
}
/**
*
* To encrypt all communications between ML compute instances in distributed training, choose True
.
* Encryption provides greater security for distributed training, but training might take longer. How long it takes
* depends on the amount of communication between compute instances, especially if you use a deep learning
* algorithms in distributed training.
*
True
. Encryption provides greater security for distributed training, but training might take
* longer. How long it takes depends on the amount of communication between compute instances, especially if
* you use a deep learning algorithms in distributed training.
*/
public void setEnableInterContainerTrafficEncryption(Boolean enableInterContainerTrafficEncryption) {
this.enableInterContainerTrafficEncryption = enableInterContainerTrafficEncryption;
}
/**
*
* To encrypt all communications between ML compute instances in distributed training, choose True
.
* Encryption provides greater security for distributed training, but training might take longer. How long it takes
* depends on the amount of communication between compute instances, especially if you use a deep learning
* algorithms in distributed training.
*
True
. Encryption provides greater security for distributed training, but training might take
* longer. How long it takes depends on the amount of communication between compute instances, especially if
* you use a deep learning algorithms in distributed training.
*/
public Boolean getEnableInterContainerTrafficEncryption() {
return this.enableInterContainerTrafficEncryption;
}
/**
*
* To encrypt all communications between ML compute instances in distributed training, choose True
.
* Encryption provides greater security for distributed training, but training might take longer. How long it takes
* depends on the amount of communication between compute instances, especially if you use a deep learning
* algorithms in distributed training.
*
True
. Encryption provides greater security for distributed training, but training might take
* longer. How long it takes depends on the amount of communication between compute instances, especially if
* you use a deep learning algorithms in distributed training.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withEnableInterContainerTrafficEncryption(Boolean enableInterContainerTrafficEncryption) {
setEnableInterContainerTrafficEncryption(enableInterContainerTrafficEncryption);
return this;
}
/**
*
* To encrypt all communications between ML compute instances in distributed training, choose True
.
* Encryption provides greater security for distributed training, but training might take longer. How long it takes
* depends on the amount of communication between compute instances, especially if you use a deep learning
* algorithms in distributed training.
*
True
. Encryption provides greater security for distributed training, but training might take
* longer. How long it takes depends on the amount of communication between compute instances, especially if
* you use a deep learning algorithms in distributed training.
*/
public Boolean isEnableInterContainerTrafficEncryption() {
return this.enableInterContainerTrafficEncryption;
}
/**
*
* A Boolean indicating whether managed spot training is enabled (True
) or not (False
).
*
True
) or not (
* False
).
*/
public void setEnableManagedSpotTraining(Boolean enableManagedSpotTraining) {
this.enableManagedSpotTraining = enableManagedSpotTraining;
}
/**
*
* A Boolean indicating whether managed spot training is enabled (True
) or not (False
).
*
True
) or not (
* False
).
*/
public Boolean getEnableManagedSpotTraining() {
return this.enableManagedSpotTraining;
}
/**
*
* A Boolean indicating whether managed spot training is enabled (True
) or not (False
).
*
True
) or not (
* False
).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withEnableManagedSpotTraining(Boolean enableManagedSpotTraining) {
setEnableManagedSpotTraining(enableManagedSpotTraining);
return this;
}
/**
*
* A Boolean indicating whether managed spot training is enabled (True
) or not (False
).
*
True
) or not (
* False
).
*/
public Boolean isEnableManagedSpotTraining() {
return this.enableManagedSpotTraining;
}
/**
* @param checkpointConfig
*/
public void setCheckpointConfig(CheckpointConfig checkpointConfig) {
this.checkpointConfig = checkpointConfig;
}
/**
* @return
*/
public CheckpointConfig getCheckpointConfig() {
return this.checkpointConfig;
}
/**
* @param checkpointConfig
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withCheckpointConfig(CheckpointConfig checkpointConfig) {
setCheckpointConfig(checkpointConfig);
return this;
}
/**
* * The training time in seconds. *
* * @param trainingTimeInSeconds * The training time in seconds. */ public void setTrainingTimeInSeconds(Integer trainingTimeInSeconds) { this.trainingTimeInSeconds = trainingTimeInSeconds; } /** ** The training time in seconds. *
* * @return The training time in seconds. */ public Integer getTrainingTimeInSeconds() { return this.trainingTimeInSeconds; } /** ** The training time in seconds. *
* * @param trainingTimeInSeconds * The training time in seconds. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withTrainingTimeInSeconds(Integer trainingTimeInSeconds) { setTrainingTimeInSeconds(trainingTimeInSeconds); return this; } /** ** The billable time in seconds. Billable time refers to the absolute wall-clock time. *
*
* Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in your
* training cluster to get the total compute time SageMaker bills you if you run distributed training. The formula
* is as follows: BillableTimeInSeconds * InstanceCount
.
*
* You can calculate the savings from using managed spot training using the formula
* (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if
* BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is 80%.
*
* Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in
* your training cluster to get the total compute time SageMaker bills you if you run distributed training.
* The formula is as follows: BillableTimeInSeconds * InstanceCount
.
*
* You can calculate the savings from using managed spot training using the formula
* (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if
* BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is
* 80%.
*/
public void setBillableTimeInSeconds(Integer billableTimeInSeconds) {
this.billableTimeInSeconds = billableTimeInSeconds;
}
/**
*
* The billable time in seconds. Billable time refers to the absolute wall-clock time. *
*
* Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in your
* training cluster to get the total compute time SageMaker bills you if you run distributed training. The formula
* is as follows: BillableTimeInSeconds * InstanceCount
.
*
* You can calculate the savings from using managed spot training using the formula
* (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if
* BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is 80%.
*
* Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in
* your training cluster to get the total compute time SageMaker bills you if you run distributed training.
* The formula is as follows: BillableTimeInSeconds * InstanceCount
.
*
* You can calculate the savings from using managed spot training using the formula
* (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if
* BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is
* 80%.
*/
public Integer getBillableTimeInSeconds() {
return this.billableTimeInSeconds;
}
/**
*
* The billable time in seconds. Billable time refers to the absolute wall-clock time. *
*
* Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in your
* training cluster to get the total compute time SageMaker bills you if you run distributed training. The formula
* is as follows: BillableTimeInSeconds * InstanceCount
.
*
* You can calculate the savings from using managed spot training using the formula
* (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if
* BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is 80%.
*
* Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in
* your training cluster to get the total compute time SageMaker bills you if you run distributed training.
* The formula is as follows: BillableTimeInSeconds * InstanceCount
.
*
* You can calculate the savings from using managed spot training using the formula
* (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if
* BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is
* 80%.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withBillableTimeInSeconds(Integer billableTimeInSeconds) {
setBillableTimeInSeconds(billableTimeInSeconds);
return this;
}
/**
* @param debugHookConfig
*/
public void setDebugHookConfig(DebugHookConfig debugHookConfig) {
this.debugHookConfig = debugHookConfig;
}
/**
* @return
*/
public DebugHookConfig getDebugHookConfig() {
return this.debugHookConfig;
}
/**
* @param debugHookConfig
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withDebugHookConfig(DebugHookConfig debugHookConfig) {
setDebugHookConfig(debugHookConfig);
return this;
}
/**
* @param experimentConfig
*/
public void setExperimentConfig(ExperimentConfig experimentConfig) {
this.experimentConfig = experimentConfig;
}
/**
* @return
*/
public ExperimentConfig getExperimentConfig() {
return this.experimentConfig;
}
/**
* @param experimentConfig
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withExperimentConfig(ExperimentConfig experimentConfig) {
setExperimentConfig(experimentConfig);
return this;
}
/**
*
* Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. *
* * @return Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. */ public java.util.List* Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. *
* * @param debugRuleConfigurations * Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. */ public void setDebugRuleConfigurations(java.util.Collection* Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setDebugRuleConfigurations(java.util.Collection)} or * {@link #withDebugRuleConfigurations(java.util.Collection)} if you want to override the existing values. *
* * @param debugRuleConfigurations * Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withDebugRuleConfigurations(DebugRuleConfiguration... debugRuleConfigurations) { if (this.debugRuleConfigurations == null) { setDebugRuleConfigurations(new java.util.ArrayList* Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. *
* * @param debugRuleConfigurations * Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withDebugRuleConfigurations(java.util.Collection* Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. *
* * @return Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. */ public java.util.List* Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. *
* * @param debugRuleEvaluationStatuses * Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. */ public void setDebugRuleEvaluationStatuses(java.util.Collection* Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setDebugRuleEvaluationStatuses(java.util.Collection)} or * {@link #withDebugRuleEvaluationStatuses(java.util.Collection)} if you want to override the existing values. *
* * @param debugRuleEvaluationStatuses * Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withDebugRuleEvaluationStatuses(DebugRuleEvaluationStatus... debugRuleEvaluationStatuses) { if (this.debugRuleEvaluationStatuses == null) { setDebugRuleEvaluationStatuses(new java.util.ArrayList* Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. *
* * @param debugRuleEvaluationStatuses * Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withDebugRuleEvaluationStatuses(java.util.Collection* Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. *
* * @return Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. */ public java.util.List* Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. *
* * @param profilerRuleConfigurations * Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. */ public void setProfilerRuleConfigurations(java.util.Collection* Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setProfilerRuleConfigurations(java.util.Collection)} or * {@link #withProfilerRuleConfigurations(java.util.Collection)} if you want to override the existing values. *
* * @param profilerRuleConfigurations * Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withProfilerRuleConfigurations(ProfilerRuleConfiguration... profilerRuleConfigurations) { if (this.profilerRuleConfigurations == null) { setProfilerRuleConfigurations(new java.util.ArrayList* Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. *
* * @param profilerRuleConfigurations * Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withProfilerRuleConfigurations(java.util.Collection* Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. *
* * @return Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. */ public java.util.List* Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. *
* * @param profilerRuleEvaluationStatuses * Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. */ public void setProfilerRuleEvaluationStatuses(java.util.Collection* Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setProfilerRuleEvaluationStatuses(java.util.Collection)} or * {@link #withProfilerRuleEvaluationStatuses(java.util.Collection)} if you want to override the existing values. *
* * @param profilerRuleEvaluationStatuses * Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withProfilerRuleEvaluationStatuses(ProfilerRuleEvaluationStatus... profilerRuleEvaluationStatuses) { if (this.profilerRuleEvaluationStatuses == null) { setProfilerRuleEvaluationStatuses(new java.util.ArrayList* Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. *
* * @param profilerRuleEvaluationStatuses * Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withProfilerRuleEvaluationStatuses(java.util.Collection* Profiling status of a training job. *
* * @param profilingStatus * Profiling status of a training job. * @see ProfilingStatus */ public void setProfilingStatus(String profilingStatus) { this.profilingStatus = profilingStatus; } /** ** Profiling status of a training job. *
* * @return Profiling status of a training job. * @see ProfilingStatus */ public String getProfilingStatus() { return this.profilingStatus; } /** ** Profiling status of a training job. *
* * @param profilingStatus * Profiling status of a training job. * @return Returns a reference to this object so that method calls can be chained together. * @see ProfilingStatus */ public DescribeTrainingJobResult withProfilingStatus(String profilingStatus) { setProfilingStatus(profilingStatus); return this; } /** ** Profiling status of a training job. *
* * @param profilingStatus * Profiling status of a training job. * @return Returns a reference to this object so that method calls can be chained together. * @see ProfilingStatus */ public DescribeTrainingJobResult withProfilingStatus(ProfilingStatus profilingStatus) { this.profilingStatus = profilingStatus.toString(); return this; } /** *
* The number of times to retry the job when the job fails due to an InternalServerError
.
*
InternalServerError
.
*/
public void setRetryStrategy(RetryStrategy retryStrategy) {
this.retryStrategy = retryStrategy;
}
/**
*
* The number of times to retry the job when the job fails due to an InternalServerError
.
*
InternalServerError
.
*/
public RetryStrategy getRetryStrategy() {
return this.retryStrategy;
}
/**
*
* The number of times to retry the job when the job fails due to an InternalServerError
.
*
InternalServerError
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeTrainingJobResult withRetryStrategy(RetryStrategy retryStrategy) {
setRetryStrategy(retryStrategy);
return this;
}
/**
* * The environment variables to set in the Docker container. *
* * @return The environment variables to set in the Docker container. */ public java.util.Map* The environment variables to set in the Docker container. *
* * @param environment * The environment variables to set in the Docker container. */ public void setEnvironment(java.util.Map* The environment variables to set in the Docker container. *
* * @param environment * The environment variables to set in the Docker container. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withEnvironment(java.util.Map* The status of the warm pool associated with the training job. *
* * @param warmPoolStatus * The status of the warm pool associated with the training job. */ public void setWarmPoolStatus(WarmPoolStatus warmPoolStatus) { this.warmPoolStatus = warmPoolStatus; } /** ** The status of the warm pool associated with the training job. *
* * @return The status of the warm pool associated with the training job. */ public WarmPoolStatus getWarmPoolStatus() { return this.warmPoolStatus; } /** ** The status of the warm pool associated with the training job. *
* * @param warmPoolStatus * The status of the warm pool associated with the training job. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeTrainingJobResult withWarmPoolStatus(WarmPoolStatus warmPoolStatus) { setWarmPoolStatus(warmPoolStatus); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getTrainingJobName() != null) sb.append("TrainingJobName: ").append(getTrainingJobName()).append(","); if (getTrainingJobArn() != null) sb.append("TrainingJobArn: ").append(getTrainingJobArn()).append(","); if (getTuningJobArn() != null) sb.append("TuningJobArn: ").append(getTuningJobArn()).append(","); if (getLabelingJobArn() != null) sb.append("LabelingJobArn: ").append(getLabelingJobArn()).append(","); if (getAutoMLJobArn() != null) sb.append("AutoMLJobArn: ").append(getAutoMLJobArn()).append(","); if (getModelArtifacts() != null) sb.append("ModelArtifacts: ").append(getModelArtifacts()).append(","); if (getTrainingJobStatus() != null) sb.append("TrainingJobStatus: ").append(getTrainingJobStatus()).append(","); if (getSecondaryStatus() != null) sb.append("SecondaryStatus: ").append(getSecondaryStatus()).append(","); if (getFailureReason() != null) sb.append("FailureReason: ").append(getFailureReason()).append(","); if (getHyperParameters() != null) sb.append("HyperParameters: ").append(getHyperParameters()).append(","); if (getAlgorithmSpecification() != null) sb.append("AlgorithmSpecification: ").append(getAlgorithmSpecification()).append(","); if (getRoleArn() != null) sb.append("RoleArn: ").append(getRoleArn()).append(","); if (getInputDataConfig() != null) sb.append("InputDataConfig: ").append(getInputDataConfig()).append(","); if (getOutputDataConfig() != null) sb.append("OutputDataConfig: ").append(getOutputDataConfig()).append(","); if (getResourceConfig() != null) sb.append("ResourceConfig: ").append(getResourceConfig()).append(","); if (getVpcConfig() != null) sb.append("VpcConfig: ").append(getVpcConfig()).append(","); if (getStoppingCondition() != null) sb.append("StoppingCondition: ").append(getStoppingCondition()).append(","); if (getCreationTime() != null) sb.append("CreationTime: ").append(getCreationTime()).append(","); if (getTrainingStartTime() != null) sb.append("TrainingStartTime: ").append(getTrainingStartTime()).append(","); if (getTrainingEndTime() != null) sb.append("TrainingEndTime: ").append(getTrainingEndTime()).append(","); if (getLastModifiedTime() != null) sb.append("LastModifiedTime: ").append(getLastModifiedTime()).append(","); if (getSecondaryStatusTransitions() != null) sb.append("SecondaryStatusTransitions: ").append(getSecondaryStatusTransitions()).append(","); if (getFinalMetricDataList() != null) sb.append("FinalMetricDataList: ").append(getFinalMetricDataList()).append(","); if (getEnableNetworkIsolation() != null) sb.append("EnableNetworkIsolation: ").append(getEnableNetworkIsolation()).append(","); if (getEnableInterContainerTrafficEncryption() != null) sb.append("EnableInterContainerTrafficEncryption: ").append(getEnableInterContainerTrafficEncryption()).append(","); if (getEnableManagedSpotTraining() != null) sb.append("EnableManagedSpotTraining: ").append(getEnableManagedSpotTraining()).append(","); if (getCheckpointConfig() != null) sb.append("CheckpointConfig: ").append(getCheckpointConfig()).append(","); if (getTrainingTimeInSeconds() != null) sb.append("TrainingTimeInSeconds: ").append(getTrainingTimeInSeconds()).append(","); if (getBillableTimeInSeconds() != null) sb.append("BillableTimeInSeconds: ").append(getBillableTimeInSeconds()).append(","); if (getDebugHookConfig() != null) sb.append("DebugHookConfig: ").append(getDebugHookConfig()).append(","); if (getExperimentConfig() != null) sb.append("ExperimentConfig: ").append(getExperimentConfig()).append(","); if (getDebugRuleConfigurations() != null) sb.append("DebugRuleConfigurations: ").append(getDebugRuleConfigurations()).append(","); if (getTensorBoardOutputConfig() != null) sb.append("TensorBoardOutputConfig: ").append(getTensorBoardOutputConfig()).append(","); if (getDebugRuleEvaluationStatuses() != null) sb.append("DebugRuleEvaluationStatuses: ").append(getDebugRuleEvaluationStatuses()).append(","); if (getProfilerConfig() != null) sb.append("ProfilerConfig: ").append(getProfilerConfig()).append(","); if (getProfilerRuleConfigurations() != null) sb.append("ProfilerRuleConfigurations: ").append(getProfilerRuleConfigurations()).append(","); if (getProfilerRuleEvaluationStatuses() != null) sb.append("ProfilerRuleEvaluationStatuses: ").append(getProfilerRuleEvaluationStatuses()).append(","); if (getProfilingStatus() != null) sb.append("ProfilingStatus: ").append(getProfilingStatus()).append(","); if (getRetryStrategy() != null) sb.append("RetryStrategy: ").append(getRetryStrategy()).append(","); if (getEnvironment() != null) sb.append("Environment: ").append(getEnvironment()).append(","); if (getWarmPoolStatus() != null) sb.append("WarmPoolStatus: ").append(getWarmPoolStatus()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof DescribeTrainingJobResult == false) return false; DescribeTrainingJobResult other = (DescribeTrainingJobResult) obj; if (other.getTrainingJobName() == null ^ this.getTrainingJobName() == null) return false; if (other.getTrainingJobName() != null && other.getTrainingJobName().equals(this.getTrainingJobName()) == false) return false; if (other.getTrainingJobArn() == null ^ this.getTrainingJobArn() == null) return false; if (other.getTrainingJobArn() != null && other.getTrainingJobArn().equals(this.getTrainingJobArn()) == false) return false; if (other.getTuningJobArn() == null ^ this.getTuningJobArn() == null) return false; if (other.getTuningJobArn() != null && other.getTuningJobArn().equals(this.getTuningJobArn()) == false) return false; if (other.getLabelingJobArn() == null ^ this.getLabelingJobArn() == null) return false; if (other.getLabelingJobArn() != null && other.getLabelingJobArn().equals(this.getLabelingJobArn()) == false) return false; if (other.getAutoMLJobArn() == null ^ this.getAutoMLJobArn() == null) return false; if (other.getAutoMLJobArn() != null && other.getAutoMLJobArn().equals(this.getAutoMLJobArn()) == false) return false; if (other.getModelArtifacts() == null ^ this.getModelArtifacts() == null) return false; if (other.getModelArtifacts() != null && other.getModelArtifacts().equals(this.getModelArtifacts()) == false) return false; if (other.getTrainingJobStatus() == null ^ this.getTrainingJobStatus() == null) return false; if (other.getTrainingJobStatus() != null && other.getTrainingJobStatus().equals(this.getTrainingJobStatus()) == false) return false; if (other.getSecondaryStatus() == null ^ this.getSecondaryStatus() == null) return false; if (other.getSecondaryStatus() != null && other.getSecondaryStatus().equals(this.getSecondaryStatus()) == false) return false; if (other.getFailureReason() == null ^ this.getFailureReason() == null) return false; if (other.getFailureReason() != null && other.getFailureReason().equals(this.getFailureReason()) == false) return false; if (other.getHyperParameters() == null ^ this.getHyperParameters() == null) return false; if (other.getHyperParameters() != null && other.getHyperParameters().equals(this.getHyperParameters()) == false) return false; if (other.getAlgorithmSpecification() == null ^ this.getAlgorithmSpecification() == null) return false; if (other.getAlgorithmSpecification() != null && other.getAlgorithmSpecification().equals(this.getAlgorithmSpecification()) == false) return false; if (other.getRoleArn() == null ^ this.getRoleArn() == null) return false; if (other.getRoleArn() != null && other.getRoleArn().equals(this.getRoleArn()) == false) return false; if (other.getInputDataConfig() == null ^ this.getInputDataConfig() == null) return false; if (other.getInputDataConfig() != null && other.getInputDataConfig().equals(this.getInputDataConfig()) == false) return false; if (other.getOutputDataConfig() == null ^ this.getOutputDataConfig() == null) return false; if (other.getOutputDataConfig() != null && other.getOutputDataConfig().equals(this.getOutputDataConfig()) == false) return false; if (other.getResourceConfig() == null ^ this.getResourceConfig() == null) return false; if (other.getResourceConfig() != null && other.getResourceConfig().equals(this.getResourceConfig()) == false) return false; if (other.getVpcConfig() == null ^ this.getVpcConfig() == null) return false; if (other.getVpcConfig() != null && other.getVpcConfig().equals(this.getVpcConfig()) == false) return false; if (other.getStoppingCondition() == null ^ this.getStoppingCondition() == null) return false; if (other.getStoppingCondition() != null && other.getStoppingCondition().equals(this.getStoppingCondition()) == false) return false; if (other.getCreationTime() == null ^ this.getCreationTime() == null) return false; if (other.getCreationTime() != null && other.getCreationTime().equals(this.getCreationTime()) == false) return false; if (other.getTrainingStartTime() == null ^ this.getTrainingStartTime() == null) return false; if (other.getTrainingStartTime() != null && other.getTrainingStartTime().equals(this.getTrainingStartTime()) == false) return false; if (other.getTrainingEndTime() == null ^ this.getTrainingEndTime() == null) return false; if (other.getTrainingEndTime() != null && other.getTrainingEndTime().equals(this.getTrainingEndTime()) == false) return false; if (other.getLastModifiedTime() == null ^ this.getLastModifiedTime() == null) return false; if (other.getLastModifiedTime() != null && other.getLastModifiedTime().equals(this.getLastModifiedTime()) == false) return false; if (other.getSecondaryStatusTransitions() == null ^ this.getSecondaryStatusTransitions() == null) return false; if (other.getSecondaryStatusTransitions() != null && other.getSecondaryStatusTransitions().equals(this.getSecondaryStatusTransitions()) == false) return false; if (other.getFinalMetricDataList() == null ^ this.getFinalMetricDataList() == null) return false; if (other.getFinalMetricDataList() != null && other.getFinalMetricDataList().equals(this.getFinalMetricDataList()) == false) return false; if (other.getEnableNetworkIsolation() == null ^ this.getEnableNetworkIsolation() == null) return false; if (other.getEnableNetworkIsolation() != null && other.getEnableNetworkIsolation().equals(this.getEnableNetworkIsolation()) == false) return false; if (other.getEnableInterContainerTrafficEncryption() == null ^ this.getEnableInterContainerTrafficEncryption() == null) return false; if (other.getEnableInterContainerTrafficEncryption() != null && other.getEnableInterContainerTrafficEncryption().equals(this.getEnableInterContainerTrafficEncryption()) == false) return false; if (other.getEnableManagedSpotTraining() == null ^ this.getEnableManagedSpotTraining() == null) return false; if (other.getEnableManagedSpotTraining() != null && other.getEnableManagedSpotTraining().equals(this.getEnableManagedSpotTraining()) == false) return false; if (other.getCheckpointConfig() == null ^ this.getCheckpointConfig() == null) return false; if (other.getCheckpointConfig() != null && other.getCheckpointConfig().equals(this.getCheckpointConfig()) == false) return false; if (other.getTrainingTimeInSeconds() == null ^ this.getTrainingTimeInSeconds() == null) return false; if (other.getTrainingTimeInSeconds() != null && other.getTrainingTimeInSeconds().equals(this.getTrainingTimeInSeconds()) == false) return false; if (other.getBillableTimeInSeconds() == null ^ this.getBillableTimeInSeconds() == null) return false; if (other.getBillableTimeInSeconds() != null && other.getBillableTimeInSeconds().equals(this.getBillableTimeInSeconds()) == false) return false; if (other.getDebugHookConfig() == null ^ this.getDebugHookConfig() == null) return false; if (other.getDebugHookConfig() != null && other.getDebugHookConfig().equals(this.getDebugHookConfig()) == false) return false; if (other.getExperimentConfig() == null ^ this.getExperimentConfig() == null) return false; if (other.getExperimentConfig() != null && other.getExperimentConfig().equals(this.getExperimentConfig()) == false) return false; if (other.getDebugRuleConfigurations() == null ^ this.getDebugRuleConfigurations() == null) return false; if (other.getDebugRuleConfigurations() != null && other.getDebugRuleConfigurations().equals(this.getDebugRuleConfigurations()) == false) return false; if (other.getTensorBoardOutputConfig() == null ^ this.getTensorBoardOutputConfig() == null) return false; if (other.getTensorBoardOutputConfig() != null && other.getTensorBoardOutputConfig().equals(this.getTensorBoardOutputConfig()) == false) return false; if (other.getDebugRuleEvaluationStatuses() == null ^ this.getDebugRuleEvaluationStatuses() == null) return false; if (other.getDebugRuleEvaluationStatuses() != null && other.getDebugRuleEvaluationStatuses().equals(this.getDebugRuleEvaluationStatuses()) == false) return false; if (other.getProfilerConfig() == null ^ this.getProfilerConfig() == null) return false; if (other.getProfilerConfig() != null && other.getProfilerConfig().equals(this.getProfilerConfig()) == false) return false; if (other.getProfilerRuleConfigurations() == null ^ this.getProfilerRuleConfigurations() == null) return false; if (other.getProfilerRuleConfigurations() != null && other.getProfilerRuleConfigurations().equals(this.getProfilerRuleConfigurations()) == false) return false; if (other.getProfilerRuleEvaluationStatuses() == null ^ this.getProfilerRuleEvaluationStatuses() == null) return false; if (other.getProfilerRuleEvaluationStatuses() != null && other.getProfilerRuleEvaluationStatuses().equals(this.getProfilerRuleEvaluationStatuses()) == false) return false; if (other.getProfilingStatus() == null ^ this.getProfilingStatus() == null) return false; if (other.getProfilingStatus() != null && other.getProfilingStatus().equals(this.getProfilingStatus()) == false) return false; if (other.getRetryStrategy() == null ^ this.getRetryStrategy() == null) return false; if (other.getRetryStrategy() != null && other.getRetryStrategy().equals(this.getRetryStrategy()) == false) return false; if (other.getEnvironment() == null ^ this.getEnvironment() == null) return false; if (other.getEnvironment() != null && other.getEnvironment().equals(this.getEnvironment()) == false) return false; if (other.getWarmPoolStatus() == null ^ this.getWarmPoolStatus() == null) return false; if (other.getWarmPoolStatus() != null && other.getWarmPoolStatus().equals(this.getWarmPoolStatus()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getTrainingJobName() == null) ? 0 : getTrainingJobName().hashCode()); hashCode = prime * hashCode + ((getTrainingJobArn() == null) ? 0 : getTrainingJobArn().hashCode()); hashCode = prime * hashCode + ((getTuningJobArn() == null) ? 0 : getTuningJobArn().hashCode()); hashCode = prime * hashCode + ((getLabelingJobArn() == null) ? 0 : getLabelingJobArn().hashCode()); hashCode = prime * hashCode + ((getAutoMLJobArn() == null) ? 0 : getAutoMLJobArn().hashCode()); hashCode = prime * hashCode + ((getModelArtifacts() == null) ? 0 : getModelArtifacts().hashCode()); hashCode = prime * hashCode + ((getTrainingJobStatus() == null) ? 0 : getTrainingJobStatus().hashCode()); hashCode = prime * hashCode + ((getSecondaryStatus() == null) ? 0 : getSecondaryStatus().hashCode()); hashCode = prime * hashCode + ((getFailureReason() == null) ? 0 : getFailureReason().hashCode()); hashCode = prime * hashCode + ((getHyperParameters() == null) ? 0 : getHyperParameters().hashCode()); hashCode = prime * hashCode + ((getAlgorithmSpecification() == null) ? 0 : getAlgorithmSpecification().hashCode()); hashCode = prime * hashCode + ((getRoleArn() == null) ? 0 : getRoleArn().hashCode()); hashCode = prime * hashCode + ((getInputDataConfig() == null) ? 0 : getInputDataConfig().hashCode()); hashCode = prime * hashCode + ((getOutputDataConfig() == null) ? 0 : getOutputDataConfig().hashCode()); hashCode = prime * hashCode + ((getResourceConfig() == null) ? 0 : getResourceConfig().hashCode()); hashCode = prime * hashCode + ((getVpcConfig() == null) ? 0 : getVpcConfig().hashCode()); hashCode = prime * hashCode + ((getStoppingCondition() == null) ? 0 : getStoppingCondition().hashCode()); hashCode = prime * hashCode + ((getCreationTime() == null) ? 0 : getCreationTime().hashCode()); hashCode = prime * hashCode + ((getTrainingStartTime() == null) ? 0 : getTrainingStartTime().hashCode()); hashCode = prime * hashCode + ((getTrainingEndTime() == null) ? 0 : getTrainingEndTime().hashCode()); hashCode = prime * hashCode + ((getLastModifiedTime() == null) ? 0 : getLastModifiedTime().hashCode()); hashCode = prime * hashCode + ((getSecondaryStatusTransitions() == null) ? 0 : getSecondaryStatusTransitions().hashCode()); hashCode = prime * hashCode + ((getFinalMetricDataList() == null) ? 0 : getFinalMetricDataList().hashCode()); hashCode = prime * hashCode + ((getEnableNetworkIsolation() == null) ? 0 : getEnableNetworkIsolation().hashCode()); hashCode = prime * hashCode + ((getEnableInterContainerTrafficEncryption() == null) ? 0 : getEnableInterContainerTrafficEncryption().hashCode()); hashCode = prime * hashCode + ((getEnableManagedSpotTraining() == null) ? 0 : getEnableManagedSpotTraining().hashCode()); hashCode = prime * hashCode + ((getCheckpointConfig() == null) ? 0 : getCheckpointConfig().hashCode()); hashCode = prime * hashCode + ((getTrainingTimeInSeconds() == null) ? 0 : getTrainingTimeInSeconds().hashCode()); hashCode = prime * hashCode + ((getBillableTimeInSeconds() == null) ? 0 : getBillableTimeInSeconds().hashCode()); hashCode = prime * hashCode + ((getDebugHookConfig() == null) ? 0 : getDebugHookConfig().hashCode()); hashCode = prime * hashCode + ((getExperimentConfig() == null) ? 0 : getExperimentConfig().hashCode()); hashCode = prime * hashCode + ((getDebugRuleConfigurations() == null) ? 0 : getDebugRuleConfigurations().hashCode()); hashCode = prime * hashCode + ((getTensorBoardOutputConfig() == null) ? 0 : getTensorBoardOutputConfig().hashCode()); hashCode = prime * hashCode + ((getDebugRuleEvaluationStatuses() == null) ? 0 : getDebugRuleEvaluationStatuses().hashCode()); hashCode = prime * hashCode + ((getProfilerConfig() == null) ? 0 : getProfilerConfig().hashCode()); hashCode = prime * hashCode + ((getProfilerRuleConfigurations() == null) ? 0 : getProfilerRuleConfigurations().hashCode()); hashCode = prime * hashCode + ((getProfilerRuleEvaluationStatuses() == null) ? 0 : getProfilerRuleEvaluationStatuses().hashCode()); hashCode = prime * hashCode + ((getProfilingStatus() == null) ? 0 : getProfilingStatus().hashCode()); hashCode = prime * hashCode + ((getRetryStrategy() == null) ? 0 : getRetryStrategy().hashCode()); hashCode = prime * hashCode + ((getEnvironment() == null) ? 0 : getEnvironment().hashCode()); hashCode = prime * hashCode + ((getWarmPoolStatus() == null) ? 0 : getWarmPoolStatus().hashCode()); return hashCode; } @Override public DescribeTrainingJobResult clone() { try { return (DescribeTrainingJobResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }