/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.glue.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.AmazonWebServiceRequest; /** * * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class StartJobRunRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable { /** *
* The name of the job definition to use. *
*/ private String jobName; /** *
* The ID of a previous JobRun
to retry.
*
* The job arguments associated with this run. For this job run, they replace the default arguments set in the job * definition itself. *
** You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *
*/ private java.util.Map
* This field is deprecated. Use MaxCapacity
instead.
*
* The number of Glue data processing units (DPUs) to allocate to this JobRun. You can allocate a minimum of 2 DPUs; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *
*/ @Deprecated private Integer allocatedCapacity; /** *
* The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before
* it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent
* job.
*
* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). *
*/ private Integer timeout; /** ** For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should specify a
* Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell
* job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either 0.0625
* or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming ETL
* job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs.
* This job type cannot have a fractional DPU allocation.
*
* The name of the SecurityConfiguration
structure to be used with this job run.
*
* Specifies configuration properties of a job run notification. *
*/ private NotificationProperty notificationProperty; /** ** The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The number of workers of a defined workerType
that are allocated when a job runs.
*
* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The name of the job definition to use. *
* * @param jobName * The name of the job definition to use. */ public void setJobName(String jobName) { this.jobName = jobName; } /** ** The name of the job definition to use. *
* * @return The name of the job definition to use. */ public String getJobName() { return this.jobName; } /** ** The name of the job definition to use. *
* * @param jobName * The name of the job definition to use. * @return Returns a reference to this object so that method calls can be chained together. */ public StartJobRunRequest withJobName(String jobName) { setJobName(jobName); return this; } /** *
* The ID of a previous JobRun
to retry.
*
JobRun
to retry.
*/
public void setJobRunId(String jobRunId) {
this.jobRunId = jobRunId;
}
/**
*
* The ID of a previous JobRun
to retry.
*
JobRun
to retry.
*/
public String getJobRunId() {
return this.jobRunId;
}
/**
*
* The ID of a previous JobRun
to retry.
*
JobRun
to retry.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartJobRunRequest withJobRunId(String jobRunId) {
setJobRunId(jobRunId);
return this;
}
/**
* * The job arguments associated with this run. For this job run, they replace the default arguments set in the job * definition itself. *
** You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *
* * @return The job arguments associated with this run. For this job run, they replace the default arguments set in * the job definition itself. ** You can specify arguments here that your own job-execution script consumes, as well as arguments that * Glue itself consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters
* in Ray jobs in the developer guide.
*/
public java.util.Map
* The job arguments associated with this run. For this job run, they replace the default arguments set in the job
* definition itself.
*
* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself
* consumes.
*
* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection,
* Secrets Manager or other secret management mechanism if you intend to keep them within the Job.
*
* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in
* Python topic in the developer guide.
*
* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters
* Used by Glue topic in the developer guide.
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray
* jobs in the developer guide.
*
* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue * itself consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters
* in Ray jobs in the developer guide.
*/
public void setArguments(java.util.Map
* The job arguments associated with this run. For this job run, they replace the default arguments set in the job
* definition itself.
*
* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself
* consumes.
*
* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection,
* Secrets Manager or other secret management mechanism if you intend to keep them within the Job.
*
* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in
* Python topic in the developer guide.
*
* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters
* Used by Glue topic in the developer guide.
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray
* jobs in the developer guide.
*
* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue * itself consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters
* in Ray jobs in the developer guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartJobRunRequest withArguments(java.util.Map
* This field is deprecated. Use
* The number of Glue data processing units (DPUs) to allocate to this JobRun. You can allocate a minimum of 2 DPUs;
* the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity
* and 16 GB of memory. For more information, see the Glue pricing
* page.
* MaxCapacity
instead.
* MaxCapacity
instead.
* The number of Glue data processing units (DPUs) to allocate to this JobRun. You can allocate a minimum of * 2 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. */ @Deprecated public void setAllocatedCapacity(Integer allocatedCapacity) { this.allocatedCapacity = allocatedCapacity; } /** *
* This field is deprecated. Use MaxCapacity
instead.
*
* The number of Glue data processing units (DPUs) to allocate to this JobRun. You can allocate a minimum of 2 DPUs; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *
* * @return This field is deprecated. UseMaxCapacity
instead.
* * The number of Glue data processing units (DPUs) to allocate to this JobRun. You can allocate a minimum of * 2 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. */ @Deprecated public Integer getAllocatedCapacity() { return this.allocatedCapacity; } /** *
* This field is deprecated. Use MaxCapacity
instead.
*
* The number of Glue data processing units (DPUs) to allocate to this JobRun. You can allocate a minimum of 2 DPUs; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *
* * @param allocatedCapacity * This field is deprecated. UseMaxCapacity
instead.
* * The number of Glue data processing units (DPUs) to allocate to this JobRun. You can allocate a minimum of * 2 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. * @return Returns a reference to this object so that method calls can be chained together. */ @Deprecated public StartJobRunRequest withAllocatedCapacity(Integer allocatedCapacity) { setAllocatedCapacity(allocatedCapacity); return this; } /** *
* The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before
* it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent
* job.
*
* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). *
* * @param timeout * TheJobRun
timeout in minutes. This is the maximum time that a job run can consume resources
* before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set
* in the parent job.
* * Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). */ public void setTimeout(Integer timeout) { this.timeout = timeout; } /** *
* The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before
* it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent
* job.
*
* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). *
* * @return TheJobRun
timeout in minutes. This is the maximum time that a job run can consume resources
* before it is terminated and enters TIMEOUT
status. This value overrides the timeout value
* set in the parent job.
* * Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). */ public Integer getTimeout() { return this.timeout; } /** *
* The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before
* it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent
* job.
*
* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). *
* * @param timeout * TheJobRun
timeout in minutes. This is the maximum time that a job run can consume resources
* before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set
* in the parent job.
* * Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). * @return Returns a reference to this object so that method calls can be chained together. */ public StartJobRunRequest withTimeout(Integer timeout) { setTimeout(timeout); return this; } /** *
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should specify a
* Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell
* job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either 0.0625
* or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming ETL
* job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs.
* This job type cannot have a fractional DPU allocation.
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should
* specify a Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python
* shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either
* 0.0625 or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark
* streaming ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The
* default is 10 DPUs. This job type cannot have a fractional DPU allocation.
*
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should specify a
* Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell
* job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either 0.0625
* or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming ETL
* job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs.
* This job type cannot have a fractional DPU allocation.
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should
* specify a Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python
* shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either
* 0.0625 or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark
* streaming ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs.
* The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
*
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should specify a
* Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell
* job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either 0.0625
* or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming ETL
* job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs.
* This job type cannot have a fractional DPU allocation.
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should
* specify a Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python
* shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either
* 0.0625 or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark
* streaming ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The
* default is 10 DPUs. This job type cannot have a fractional DPU allocation.
*
* The name of the SecurityConfiguration
structure to be used with this job run.
*
SecurityConfiguration
structure to be used with this job run.
*/
public void setSecurityConfiguration(String securityConfiguration) {
this.securityConfiguration = securityConfiguration;
}
/**
*
* The name of the SecurityConfiguration
structure to be used with this job run.
*
SecurityConfiguration
structure to be used with this job run.
*/
public String getSecurityConfiguration() {
return this.securityConfiguration;
}
/**
*
* The name of the SecurityConfiguration
structure to be used with this job run.
*
SecurityConfiguration
structure to be used with this job run.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartJobRunRequest withSecurityConfiguration(String securityConfiguration) {
setSecurityConfiguration(securityConfiguration);
return this;
}
/**
* * Specifies configuration properties of a job run notification. *
* * @param notificationProperty * Specifies configuration properties of a job run notification. */ public void setNotificationProperty(NotificationProperty notificationProperty) { this.notificationProperty = notificationProperty; } /** ** Specifies configuration properties of a job run notification. *
* * @return Specifies configuration properties of a job run notification. */ public NotificationProperty getNotificationProperty() { return this.notificationProperty; } /** ** Specifies configuration properties of a job run notification. *
* * @param notificationProperty * Specifies configuration properties of a job run notification. * @return Returns a reference to this object so that method calls can be chained together. */ public StartJobRunRequest withNotificationProperty(NotificationProperty notificationProperty) { setNotificationProperty(notificationProperty); return this; } /** ** The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads
* such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB
* disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB
* disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services
* Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe
* (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low
* volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB
* disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB
* disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB
* disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This
* worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web
* Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia
* Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and
* Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This
* worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web
* Services Regions as supported for the G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low
* volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB
* disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads
* such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB
* disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB
* disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services
* Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe
* (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low
* volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB
* disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads
* such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB
* disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB
* disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services
* Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe
* (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low
* volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB
* disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The number of workers of a defined workerType
that are allocated when a job runs.
*
workerType
that are allocated when a job runs.
*/
public void setNumberOfWorkers(Integer numberOfWorkers) {
this.numberOfWorkers = numberOfWorkers;
}
/**
*
* The number of workers of a defined workerType
that are allocated when a job runs.
*
workerType
that are allocated when a job runs.
*/
public Integer getNumberOfWorkers() {
return this.numberOfWorkers;
}
/**
*
* The number of workers of a defined workerType
that are allocated when a job runs.
*
workerType
that are allocated when a job runs.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartJobRunRequest withNumberOfWorkers(Integer numberOfWorkers) {
setNumberOfWorkers(numberOfWorkers);
return this;
}
/**
* * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark
* jobs.
* @see ExecutionClass
*/
public void setExecutionClass(String executionClass) {
this.executionClass = executionClass;
}
/**
*
* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times * may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark
* jobs.
* @see ExecutionClass
*/
public String getExecutionClass() {
return this.executionClass;
}
/**
*
* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark
* jobs.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExecutionClass
*/
public StartJobRunRequest withExecutionClass(String executionClass) {
setExecutionClass(executionClass);
return this;
}
/**
*
* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark
* jobs.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExecutionClass
*/
public StartJobRunRequest withExecutionClass(ExecutionClass executionClass) {
this.executionClass = executionClass.toString();
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getJobName() != null)
sb.append("JobName: ").append(getJobName()).append(",");
if (getJobRunId() != null)
sb.append("JobRunId: ").append(getJobRunId()).append(",");
if (getArguments() != null)
sb.append("Arguments: ").append(getArguments()).append(",");
if (getAllocatedCapacity() != null)
sb.append("AllocatedCapacity: ").append(getAllocatedCapacity()).append(",");
if (getTimeout() != null)
sb.append("Timeout: ").append(getTimeout()).append(",");
if (getMaxCapacity() != null)
sb.append("MaxCapacity: ").append(getMaxCapacity()).append(",");
if (getSecurityConfiguration() != null)
sb.append("SecurityConfiguration: ").append(getSecurityConfiguration()).append(",");
if (getNotificationProperty() != null)
sb.append("NotificationProperty: ").append(getNotificationProperty()).append(",");
if (getWorkerType() != null)
sb.append("WorkerType: ").append(getWorkerType()).append(",");
if (getNumberOfWorkers() != null)
sb.append("NumberOfWorkers: ").append(getNumberOfWorkers()).append(",");
if (getExecutionClass() != null)
sb.append("ExecutionClass: ").append(getExecutionClass());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof StartJobRunRequest == false)
return false;
StartJobRunRequest other = (StartJobRunRequest) obj;
if (other.getJobName() == null ^ this.getJobName() == null)
return false;
if (other.getJobName() != null && other.getJobName().equals(this.getJobName()) == false)
return false;
if (other.getJobRunId() == null ^ this.getJobRunId() == null)
return false;
if (other.getJobRunId() != null && other.getJobRunId().equals(this.getJobRunId()) == false)
return false;
if (other.getArguments() == null ^ this.getArguments() == null)
return false;
if (other.getArguments() != null && other.getArguments().equals(this.getArguments()) == false)
return false;
if (other.getAllocatedCapacity() == null ^ this.getAllocatedCapacity() == null)
return false;
if (other.getAllocatedCapacity() != null && other.getAllocatedCapacity().equals(this.getAllocatedCapacity()) == false)
return false;
if (other.getTimeout() == null ^ this.getTimeout() == null)
return false;
if (other.getTimeout() != null && other.getTimeout().equals(this.getTimeout()) == false)
return false;
if (other.getMaxCapacity() == null ^ this.getMaxCapacity() == null)
return false;
if (other.getMaxCapacity() != null && other.getMaxCapacity().equals(this.getMaxCapacity()) == false)
return false;
if (other.getSecurityConfiguration() == null ^ this.getSecurityConfiguration() == null)
return false;
if (other.getSecurityConfiguration() != null && other.getSecurityConfiguration().equals(this.getSecurityConfiguration()) == false)
return false;
if (other.getNotificationProperty() == null ^ this.getNotificationProperty() == null)
return false;
if (other.getNotificationProperty() != null && other.getNotificationProperty().equals(this.getNotificationProperty()) == false)
return false;
if (other.getWorkerType() == null ^ this.getWorkerType() == null)
return false;
if (other.getWorkerType() != null && other.getWorkerType().equals(this.getWorkerType()) == false)
return false;
if (other.getNumberOfWorkers() == null ^ this.getNumberOfWorkers() == null)
return false;
if (other.getNumberOfWorkers() != null && other.getNumberOfWorkers().equals(this.getNumberOfWorkers()) == false)
return false;
if (other.getExecutionClass() == null ^ this.getExecutionClass() == null)
return false;
if (other.getExecutionClass() != null && other.getExecutionClass().equals(this.getExecutionClass()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getJobName() == null) ? 0 : getJobName().hashCode());
hashCode = prime * hashCode + ((getJobRunId() == null) ? 0 : getJobRunId().hashCode());
hashCode = prime * hashCode + ((getArguments() == null) ? 0 : getArguments().hashCode());
hashCode = prime * hashCode + ((getAllocatedCapacity() == null) ? 0 : getAllocatedCapacity().hashCode());
hashCode = prime * hashCode + ((getTimeout() == null) ? 0 : getTimeout().hashCode());
hashCode = prime * hashCode + ((getMaxCapacity() == null) ? 0 : getMaxCapacity().hashCode());
hashCode = prime * hashCode + ((getSecurityConfiguration() == null) ? 0 : getSecurityConfiguration().hashCode());
hashCode = prime * hashCode + ((getNotificationProperty() == null) ? 0 : getNotificationProperty().hashCode());
hashCode = prime * hashCode + ((getWorkerType() == null) ? 0 : getWorkerType().hashCode());
hashCode = prime * hashCode + ((getNumberOfWorkers() == null) ? 0 : getNumberOfWorkers().hashCode());
hashCode = prime * hashCode + ((getExecutionClass() == null) ? 0 : getExecutionClass().hashCode());
return hashCode;
}
@Override
public StartJobRunRequest clone() {
return (StartJobRunRequest) super.clone();
}
}