/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.glue.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* Specifies information used to update an existing job definition. The previous job definition is completely * overwritten by this information. *
* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class JobUpdate implements Serializable, Cloneable, StructuredPojo { /** ** Description of the job being defined. *
*/ private String description; /** ** This field is reserved for future use. *
*/ private String logUri; /** ** The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). *
*/ private String role; /** *
* An ExecutionProperty
specifying the maximum number of concurrent runs allowed for this job.
*
* The JobCommand
that runs this job (required).
*
* The default arguments for every run of this job, specified as name-value pairs. *
** You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *
*/ private java.util.Map* Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value * pairs. *
*/ private java.util.Map* The connections used for this job. *
*/ private ConnectionsList connections; /** ** The maximum number of times to retry this job if it fails. *
*/ private Integer maxRetries; /** *
* This field is deprecated. Use MaxCapacity
instead.
*
* The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 DPUs; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *
*/ @Deprecated private Integer allocatedCapacity; /** *
* The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated
* and enters TIMEOUT
status. The default is 2,880 minutes (48 hours).
*
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should specify a
* Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell
* job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either 0.0625
* or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming ETL
* job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs.
* This job type cannot have a fractional DPU allocation.
*
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The number of workers of a defined workerType
that are allocated when a job runs.
*
* The name of the SecurityConfiguration
structure to be used with this job.
*
* Specifies the configuration properties of a job notification. *
*/ private NotificationProperty notificationProperty; /** *
* In Spark jobs, GlueVersion
determines the versions of Apache Spark and Python that Glue available in
* a job. The Python version indicates the version supported for jobs of type Spark.
*
* Ray jobs should set GlueVersion
to 4.0
or greater. However, the versions of Ray, Python
* and additional libraries available in your Ray job are determined by the Runtime
parameter of the
* Job command.
*
* For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *
** Jobs that are created without specifying a Glue version default to Glue 0.9. *
*/ private String glueVersion; /** ** The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio * code generation is based. *
*/ private java.util.Map* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a * remote repository. *
*/ private SourceControlDetails sourceControlDetails; /** ** Description of the job being defined. *
* * @param description * Description of the job being defined. */ public void setDescription(String description) { this.description = description; } /** ** Description of the job being defined. *
* * @return Description of the job being defined. */ public String getDescription() { return this.description; } /** ** Description of the job being defined. *
* * @param description * Description of the job being defined. * @return Returns a reference to this object so that method calls can be chained together. */ public JobUpdate withDescription(String description) { setDescription(description); return this; } /** ** This field is reserved for future use. *
* * @param logUri * This field is reserved for future use. */ public void setLogUri(String logUri) { this.logUri = logUri; } /** ** This field is reserved for future use. *
* * @return This field is reserved for future use. */ public String getLogUri() { return this.logUri; } /** ** This field is reserved for future use. *
* * @param logUri * This field is reserved for future use. * @return Returns a reference to this object so that method calls can be chained together. */ public JobUpdate withLogUri(String logUri) { setLogUri(logUri); return this; } /** ** The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). *
* * @param role * The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). */ public void setRole(String role) { this.role = role; } /** ** The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). *
* * @return The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). */ public String getRole() { return this.role; } /** ** The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). *
* * @param role * The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). * @return Returns a reference to this object so that method calls can be chained together. */ public JobUpdate withRole(String role) { setRole(role); return this; } /** *
* An ExecutionProperty
specifying the maximum number of concurrent runs allowed for this job.
*
ExecutionProperty
specifying the maximum number of concurrent runs allowed for this job.
*/
public void setExecutionProperty(ExecutionProperty executionProperty) {
this.executionProperty = executionProperty;
}
/**
*
* An ExecutionProperty
specifying the maximum number of concurrent runs allowed for this job.
*
ExecutionProperty
specifying the maximum number of concurrent runs allowed for this job.
*/
public ExecutionProperty getExecutionProperty() {
return this.executionProperty;
}
/**
*
* An ExecutionProperty
specifying the maximum number of concurrent runs allowed for this job.
*
ExecutionProperty
specifying the maximum number of concurrent runs allowed for this job.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JobUpdate withExecutionProperty(ExecutionProperty executionProperty) {
setExecutionProperty(executionProperty);
return this;
}
/**
*
* The JobCommand
that runs this job (required).
*
JobCommand
that runs this job (required).
*/
public void setCommand(JobCommand command) {
this.command = command;
}
/**
*
* The JobCommand
that runs this job (required).
*
JobCommand
that runs this job (required).
*/
public JobCommand getCommand() {
return this.command;
}
/**
*
* The JobCommand
that runs this job (required).
*
JobCommand
that runs this job (required).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JobUpdate withCommand(JobCommand command) {
setCommand(command);
return this;
}
/**
* * The default arguments for every run of this job, specified as name-value pairs. *
** You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *
* * @return The default arguments for every run of this job, specified as name-value pairs. ** You can specify arguments here that your own job-execution script consumes, as well as arguments that * Glue itself consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters
* in Ray jobs in the developer guide.
*/
public java.util.Map
* The default arguments for every run of this job, specified as name-value pairs.
*
* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself
* consumes.
*
* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection,
* Secrets Manager or other secret management mechanism if you intend to keep them within the Job.
*
* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in
* Python topic in the developer guide.
*
* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters
* Used by Glue topic in the developer guide.
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray
* jobs in the developer guide.
*
* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue * itself consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters
* in Ray jobs in the developer guide.
*/
public void setDefaultArguments(java.util.Map
* The default arguments for every run of this job, specified as name-value pairs.
*
* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself
* consumes.
*
* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection,
* Secrets Manager or other secret management mechanism if you intend to keep them within the Job.
*
* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in
* Python topic in the developer guide.
*
* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters
* Used by Glue topic in the developer guide.
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray
* jobs in the developer guide.
*
* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue * itself consumes. *
** Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *
** For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *
** For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *
*
* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters
* in Ray jobs in the developer guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JobUpdate withDefaultArguments(java.util.Map
* Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value
* pairs.
*
* Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value
* pairs.
*
* Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value
* pairs.
*
* The connections used for this job.
*
* The connections used for this job.
*
* The connections used for this job.
*
* The maximum number of times to retry this job if it fails.
*
* The maximum number of times to retry this job if it fails.
*
* The maximum number of times to retry this job if it fails.
*
* This field is deprecated. Use
* The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 DPUs;
* the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity
* and 16 GB of memory. For more information, see the Glue pricing
* page.
* MaxCapacity
instead.
* MaxCapacity
instead.
* The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 * DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. */ @Deprecated public void setAllocatedCapacity(Integer allocatedCapacity) { this.allocatedCapacity = allocatedCapacity; } /** *
* This field is deprecated. Use MaxCapacity
instead.
*
* The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 DPUs; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *
* * @return This field is deprecated. UseMaxCapacity
instead.
* * The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 * DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. */ @Deprecated public Integer getAllocatedCapacity() { return this.allocatedCapacity; } /** *
* This field is deprecated. Use MaxCapacity
instead.
*
* The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 DPUs; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *
* * @param allocatedCapacity * This field is deprecated. UseMaxCapacity
instead.
* * The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 * DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. * @return Returns a reference to this object so that method calls can be chained together. */ @Deprecated public JobUpdate withAllocatedCapacity(Integer allocatedCapacity) { setAllocatedCapacity(allocatedCapacity); return this; } /** *
* The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated
* and enters TIMEOUT
status. The default is 2,880 minutes (48 hours).
*
TIMEOUT
status. The default is 2,880 minutes (48 hours).
*/
public void setTimeout(Integer timeout) {
this.timeout = timeout;
}
/**
*
* The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated
* and enters TIMEOUT
status. The default is 2,880 minutes (48 hours).
*
TIMEOUT
status. The default is 2,880 minutes (48 hours).
*/
public Integer getTimeout() {
return this.timeout;
}
/**
*
* The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated
* and enters TIMEOUT
status. The default is 2,880 minutes (48 hours).
*
TIMEOUT
status. The default is 2,880 minutes (48 hours).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JobUpdate withTimeout(Integer timeout) {
setTimeout(timeout);
return this;
}
/**
* * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should specify a
* Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell
* job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either 0.0625
* or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming ETL
* job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs.
* This job type cannot have a fractional DPU allocation.
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should
* specify a Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python
* shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either
* 0.0625 or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark
* streaming ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The
* default is 10 DPUs. This job type cannot have a fractional DPU allocation.
*
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should specify a
* Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell
* job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either 0.0625
* or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming ETL
* job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs.
* This job type cannot have a fractional DPU allocation.
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should
* specify a Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python
* shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either
* 0.0625 or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark
* streaming ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs.
* The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
*
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should specify a
* Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell
* job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either 0.0625
* or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming ETL
* job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs.
* This job type cannot have a fractional DPU allocation.
*
* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity
. Instead, you should
* specify a Worker type
and the Number of workers
.
*
* Do not set MaxCapacity
if using WorkerType
and NumberOfWorkers
.
*
* The value that can be allocated for MaxCapacity
depends on whether you are running a Python
* shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
*
* When you specify a Python shell job (JobCommand.Name
="pythonshell"), you can allocate either
* 0.0625 or 1 DPU. The default is 0.0625 DPU.
*
* When you specify an Apache Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark
* streaming ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2 to 100 DPUs. The
* default is 10 DPUs. This job type cannot have a fractional DPU allocation.
*
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads
* such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB
* disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB
* disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services
* Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe
* (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low
* volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB
* disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB
* disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB
* disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This
* worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web
* Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia
* Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and
* Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This
* worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web
* Services Regions as supported for the G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low
* volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB
* disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads
* such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB
* disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB
* disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services
* Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe
* (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low
* volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB
* disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk
* (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such
* as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk
* (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio),
* US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
* Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk
* (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available
* only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the
* G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume
* streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk
* (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* For the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk
* (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads
* such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
*
* For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB
* disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for
* workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run
* most jobs.
*
* For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB
* disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services
* Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe
* (Stockholm).
*
* For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for
* jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker
* type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
*
* For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB
* disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low
* volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
*
* For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB
* disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
*
* The number of workers of a defined workerType
that are allocated when a job runs.
*
workerType
that are allocated when a job runs.
*/
public void setNumberOfWorkers(Integer numberOfWorkers) {
this.numberOfWorkers = numberOfWorkers;
}
/**
*
* The number of workers of a defined workerType
that are allocated when a job runs.
*
workerType
that are allocated when a job runs.
*/
public Integer getNumberOfWorkers() {
return this.numberOfWorkers;
}
/**
*
* The number of workers of a defined workerType
that are allocated when a job runs.
*
workerType
that are allocated when a job runs.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JobUpdate withNumberOfWorkers(Integer numberOfWorkers) {
setNumberOfWorkers(numberOfWorkers);
return this;
}
/**
*
* The name of the SecurityConfiguration
structure to be used with this job.
*
SecurityConfiguration
structure to be used with this job.
*/
public void setSecurityConfiguration(String securityConfiguration) {
this.securityConfiguration = securityConfiguration;
}
/**
*
* The name of the SecurityConfiguration
structure to be used with this job.
*
SecurityConfiguration
structure to be used with this job.
*/
public String getSecurityConfiguration() {
return this.securityConfiguration;
}
/**
*
* The name of the SecurityConfiguration
structure to be used with this job.
*
SecurityConfiguration
structure to be used with this job.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JobUpdate withSecurityConfiguration(String securityConfiguration) {
setSecurityConfiguration(securityConfiguration);
return this;
}
/**
* * Specifies the configuration properties of a job notification. *
* * @param notificationProperty * Specifies the configuration properties of a job notification. */ public void setNotificationProperty(NotificationProperty notificationProperty) { this.notificationProperty = notificationProperty; } /** ** Specifies the configuration properties of a job notification. *
* * @return Specifies the configuration properties of a job notification. */ public NotificationProperty getNotificationProperty() { return this.notificationProperty; } /** ** Specifies the configuration properties of a job notification. *
* * @param notificationProperty * Specifies the configuration properties of a job notification. * @return Returns a reference to this object so that method calls can be chained together. */ public JobUpdate withNotificationProperty(NotificationProperty notificationProperty) { setNotificationProperty(notificationProperty); return this; } /** *
* In Spark jobs, GlueVersion
determines the versions of Apache Spark and Python that Glue available in
* a job. The Python version indicates the version supported for jobs of type Spark.
*
* Ray jobs should set GlueVersion
to 4.0
or greater. However, the versions of Ray, Python
* and additional libraries available in your Ray job are determined by the Runtime
parameter of the
* Job command.
*
* For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *
** Jobs that are created without specifying a Glue version default to Glue 0.9. *
* * @param glueVersion * In Spark jobs,GlueVersion
determines the versions of Apache Spark and Python that Glue
* available in a job. The Python version indicates the version supported for jobs of type Spark.
*
* Ray jobs should set GlueVersion
to 4.0
or greater. However, the versions of Ray,
* Python and additional libraries available in your Ray job are determined by the Runtime
* parameter of the Job command.
*
* For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *
** Jobs that are created without specifying a Glue version default to Glue 0.9. */ public void setGlueVersion(String glueVersion) { this.glueVersion = glueVersion; } /** *
* In Spark jobs, GlueVersion
determines the versions of Apache Spark and Python that Glue available in
* a job. The Python version indicates the version supported for jobs of type Spark.
*
* Ray jobs should set GlueVersion
to 4.0
or greater. However, the versions of Ray, Python
* and additional libraries available in your Ray job are determined by the Runtime
parameter of the
* Job command.
*
* For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *
** Jobs that are created without specifying a Glue version default to Glue 0.9. *
* * @return In Spark jobs,GlueVersion
determines the versions of Apache Spark and Python that Glue
* available in a job. The Python version indicates the version supported for jobs of type Spark.
*
* Ray jobs should set GlueVersion
to 4.0
or greater. However, the versions of
* Ray, Python and additional libraries available in your Ray job are determined by the Runtime
* parameter of the Job command.
*
* For more information about the available Glue versions and corresponding Spark and Python versions, see * Glue version in the developer * guide. *
** Jobs that are created without specifying a Glue version default to Glue 0.9. */ public String getGlueVersion() { return this.glueVersion; } /** *
* In Spark jobs, GlueVersion
determines the versions of Apache Spark and Python that Glue available in
* a job. The Python version indicates the version supported for jobs of type Spark.
*
* Ray jobs should set GlueVersion
to 4.0
or greater. However, the versions of Ray, Python
* and additional libraries available in your Ray job are determined by the Runtime
parameter of the
* Job command.
*
* For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *
** Jobs that are created without specifying a Glue version default to Glue 0.9. *
* * @param glueVersion * In Spark jobs,GlueVersion
determines the versions of Apache Spark and Python that Glue
* available in a job. The Python version indicates the version supported for jobs of type Spark.
*
* Ray jobs should set GlueVersion
to 4.0
or greater. However, the versions of Ray,
* Python and additional libraries available in your Ray job are determined by the Runtime
* parameter of the Job command.
*
* For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *
** Jobs that are created without specifying a Glue version default to Glue 0.9. * @return Returns a reference to this object so that method calls can be chained together. */ public JobUpdate withGlueVersion(String glueVersion) { setGlueVersion(glueVersion); return this; } /** *
* The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio * code generation is based. *
* * @return The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue * Studio code generation is based. */ public java.util.Map* The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio * code generation is based. *
* * @param codeGenConfigurationNodes * The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue * Studio code generation is based. */ public void setCodeGenConfigurationNodes(java.util.Map* The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio * code generation is based. *
* * @param codeGenConfigurationNodes * The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue * Studio code generation is based. * @return Returns a reference to this object so that method calls can be chained together. */ public JobUpdate withCodeGenConfigurationNodes(java.util.Map* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark
* jobs.
* @see ExecutionClass
*/
public void setExecutionClass(String executionClass) {
this.executionClass = executionClass;
}
/**
*
* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times * may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark
* jobs.
* @see ExecutionClass
*/
public String getExecutionClass() {
return this.executionClass;
}
/**
*
* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark
* jobs.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExecutionClass
*/
public JobUpdate withExecutionClass(String executionClass) {
setExecutionClass(executionClass);
return this;
}
/**
*
* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *
** The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
*
* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *
*
* Only jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set
* ExecutionClass
to FLEX
. The flexible execution class is available for Spark
* jobs.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExecutionClass
*/
public JobUpdate withExecutionClass(ExecutionClass executionClass) {
this.executionClass = executionClass.toString();
return this;
}
/**
*
* The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a * remote repository. *
* * @param sourceControlDetails * The details for a source control configuration for a job, allowing synchronization of job artifacts to or * from a remote repository. */ public void setSourceControlDetails(SourceControlDetails sourceControlDetails) { this.sourceControlDetails = sourceControlDetails; } /** ** The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a * remote repository. *
* * @return The details for a source control configuration for a job, allowing synchronization of job artifacts to or * from a remote repository. */ public SourceControlDetails getSourceControlDetails() { return this.sourceControlDetails; } /** ** The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a * remote repository. *
* * @param sourceControlDetails * The details for a source control configuration for a job, allowing synchronization of job artifacts to or * from a remote repository. * @return Returns a reference to this object so that method calls can be chained together. */ public JobUpdate withSourceControlDetails(SourceControlDetails sourceControlDetails) { setSourceControlDetails(sourceControlDetails); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getDescription() != null) sb.append("Description: ").append(getDescription()).append(","); if (getLogUri() != null) sb.append("LogUri: ").append(getLogUri()).append(","); if (getRole() != null) sb.append("Role: ").append(getRole()).append(","); if (getExecutionProperty() != null) sb.append("ExecutionProperty: ").append(getExecutionProperty()).append(","); if (getCommand() != null) sb.append("Command: ").append(getCommand()).append(","); if (getDefaultArguments() != null) sb.append("DefaultArguments: ").append(getDefaultArguments()).append(","); if (getNonOverridableArguments() != null) sb.append("NonOverridableArguments: ").append(getNonOverridableArguments()).append(","); if (getConnections() != null) sb.append("Connections: ").append(getConnections()).append(","); if (getMaxRetries() != null) sb.append("MaxRetries: ").append(getMaxRetries()).append(","); if (getAllocatedCapacity() != null) sb.append("AllocatedCapacity: ").append(getAllocatedCapacity()).append(","); if (getTimeout() != null) sb.append("Timeout: ").append(getTimeout()).append(","); if (getMaxCapacity() != null) sb.append("MaxCapacity: ").append(getMaxCapacity()).append(","); if (getWorkerType() != null) sb.append("WorkerType: ").append(getWorkerType()).append(","); if (getNumberOfWorkers() != null) sb.append("NumberOfWorkers: ").append(getNumberOfWorkers()).append(","); if (getSecurityConfiguration() != null) sb.append("SecurityConfiguration: ").append(getSecurityConfiguration()).append(","); if (getNotificationProperty() != null) sb.append("NotificationProperty: ").append(getNotificationProperty()).append(","); if (getGlueVersion() != null) sb.append("GlueVersion: ").append(getGlueVersion()).append(","); if (getCodeGenConfigurationNodes() != null) sb.append("CodeGenConfigurationNodes: ").append("***Sensitive Data Redacted***").append(","); if (getExecutionClass() != null) sb.append("ExecutionClass: ").append(getExecutionClass()).append(","); if (getSourceControlDetails() != null) sb.append("SourceControlDetails: ").append(getSourceControlDetails()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof JobUpdate == false) return false; JobUpdate other = (JobUpdate) obj; if (other.getDescription() == null ^ this.getDescription() == null) return false; if (other.getDescription() != null && other.getDescription().equals(this.getDescription()) == false) return false; if (other.getLogUri() == null ^ this.getLogUri() == null) return false; if (other.getLogUri() != null && other.getLogUri().equals(this.getLogUri()) == false) return false; if (other.getRole() == null ^ this.getRole() == null) return false; if (other.getRole() != null && other.getRole().equals(this.getRole()) == false) return false; if (other.getExecutionProperty() == null ^ this.getExecutionProperty() == null) return false; if (other.getExecutionProperty() != null && other.getExecutionProperty().equals(this.getExecutionProperty()) == false) return false; if (other.getCommand() == null ^ this.getCommand() == null) return false; if (other.getCommand() != null && other.getCommand().equals(this.getCommand()) == false) return false; if (other.getDefaultArguments() == null ^ this.getDefaultArguments() == null) return false; if (other.getDefaultArguments() != null && other.getDefaultArguments().equals(this.getDefaultArguments()) == false) return false; if (other.getNonOverridableArguments() == null ^ this.getNonOverridableArguments() == null) return false; if (other.getNonOverridableArguments() != null && other.getNonOverridableArguments().equals(this.getNonOverridableArguments()) == false) return false; if (other.getConnections() == null ^ this.getConnections() == null) return false; if (other.getConnections() != null && other.getConnections().equals(this.getConnections()) == false) return false; if (other.getMaxRetries() == null ^ this.getMaxRetries() == null) return false; if (other.getMaxRetries() != null && other.getMaxRetries().equals(this.getMaxRetries()) == false) return false; if (other.getAllocatedCapacity() == null ^ this.getAllocatedCapacity() == null) return false; if (other.getAllocatedCapacity() != null && other.getAllocatedCapacity().equals(this.getAllocatedCapacity()) == false) return false; if (other.getTimeout() == null ^ this.getTimeout() == null) return false; if (other.getTimeout() != null && other.getTimeout().equals(this.getTimeout()) == false) return false; if (other.getMaxCapacity() == null ^ this.getMaxCapacity() == null) return false; if (other.getMaxCapacity() != null && other.getMaxCapacity().equals(this.getMaxCapacity()) == false) return false; if (other.getWorkerType() == null ^ this.getWorkerType() == null) return false; if (other.getWorkerType() != null && other.getWorkerType().equals(this.getWorkerType()) == false) return false; if (other.getNumberOfWorkers() == null ^ this.getNumberOfWorkers() == null) return false; if (other.getNumberOfWorkers() != null && other.getNumberOfWorkers().equals(this.getNumberOfWorkers()) == false) return false; if (other.getSecurityConfiguration() == null ^ this.getSecurityConfiguration() == null) return false; if (other.getSecurityConfiguration() != null && other.getSecurityConfiguration().equals(this.getSecurityConfiguration()) == false) return false; if (other.getNotificationProperty() == null ^ this.getNotificationProperty() == null) return false; if (other.getNotificationProperty() != null && other.getNotificationProperty().equals(this.getNotificationProperty()) == false) return false; if (other.getGlueVersion() == null ^ this.getGlueVersion() == null) return false; if (other.getGlueVersion() != null && other.getGlueVersion().equals(this.getGlueVersion()) == false) return false; if (other.getCodeGenConfigurationNodes() == null ^ this.getCodeGenConfigurationNodes() == null) return false; if (other.getCodeGenConfigurationNodes() != null && other.getCodeGenConfigurationNodes().equals(this.getCodeGenConfigurationNodes()) == false) return false; if (other.getExecutionClass() == null ^ this.getExecutionClass() == null) return false; if (other.getExecutionClass() != null && other.getExecutionClass().equals(this.getExecutionClass()) == false) return false; if (other.getSourceControlDetails() == null ^ this.getSourceControlDetails() == null) return false; if (other.getSourceControlDetails() != null && other.getSourceControlDetails().equals(this.getSourceControlDetails()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getDescription() == null) ? 0 : getDescription().hashCode()); hashCode = prime * hashCode + ((getLogUri() == null) ? 0 : getLogUri().hashCode()); hashCode = prime * hashCode + ((getRole() == null) ? 0 : getRole().hashCode()); hashCode = prime * hashCode + ((getExecutionProperty() == null) ? 0 : getExecutionProperty().hashCode()); hashCode = prime * hashCode + ((getCommand() == null) ? 0 : getCommand().hashCode()); hashCode = prime * hashCode + ((getDefaultArguments() == null) ? 0 : getDefaultArguments().hashCode()); hashCode = prime * hashCode + ((getNonOverridableArguments() == null) ? 0 : getNonOverridableArguments().hashCode()); hashCode = prime * hashCode + ((getConnections() == null) ? 0 : getConnections().hashCode()); hashCode = prime * hashCode + ((getMaxRetries() == null) ? 0 : getMaxRetries().hashCode()); hashCode = prime * hashCode + ((getAllocatedCapacity() == null) ? 0 : getAllocatedCapacity().hashCode()); hashCode = prime * hashCode + ((getTimeout() == null) ? 0 : getTimeout().hashCode()); hashCode = prime * hashCode + ((getMaxCapacity() == null) ? 0 : getMaxCapacity().hashCode()); hashCode = prime * hashCode + ((getWorkerType() == null) ? 0 : getWorkerType().hashCode()); hashCode = prime * hashCode + ((getNumberOfWorkers() == null) ? 0 : getNumberOfWorkers().hashCode()); hashCode = prime * hashCode + ((getSecurityConfiguration() == null) ? 0 : getSecurityConfiguration().hashCode()); hashCode = prime * hashCode + ((getNotificationProperty() == null) ? 0 : getNotificationProperty().hashCode()); hashCode = prime * hashCode + ((getGlueVersion() == null) ? 0 : getGlueVersion().hashCode()); hashCode = prime * hashCode + ((getCodeGenConfigurationNodes() == null) ? 0 : getCodeGenConfigurationNodes().hashCode()); hashCode = prime * hashCode + ((getExecutionClass() == null) ? 0 : getExecutionClass().hashCode()); hashCode = prime * hashCode + ((getSourceControlDetails() == null) ? 0 : getSourceControlDetails().hashCode()); return hashCode; } @Override public JobUpdate clone() { try { return (JobUpdate) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.glue.model.transform.JobUpdateMarshaller.getInstance().marshall(this, protocolMarshaller); } }