/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.glue.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *

* Contains information about a job run. *

* * @see AWS API Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class JobRun implements Serializable, Cloneable, StructuredPojo { /** *

* The ID of this job run. *

*/ private String id; /** *

* The number of the attempt to run this job. *

*/ private Integer attempt; /** *

* The ID of the previous run of this job. For example, the JobRunId specified in the * StartJobRun action. *

*/ private String previousRunId; /** *

* The name of the trigger that started this job run. *

*/ private String triggerName; /** *

* The name of the job definition being used in this run. *

*/ private String jobName; /** *

* The date and time at which this job run was started. *

*/ private java.util.Date startedOn; /** *

* The last time that this job run was modified. *

*/ private java.util.Date lastModifiedOn; /** *

* The date and time that this job run completed. *

*/ private java.util.Date completedOn; /** *

* The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. *

*/ private String jobRunState; /** *

* The job arguments associated with this run. For this job run, they replace the default arguments set in the job * definition itself. *

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *

*/ private java.util.Map arguments; /** *

* An error message associated with this job run. *

*/ private String errorMessage; /** *

* A list of predecessors to this job run. *

*/ private java.util.List predecessorRuns; /** *

* This field is deprecated. Use MaxCapacity instead. *

*

* The number of Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *

*/ @Deprecated private Integer allocatedCapacity; /** *

* The amount of time (in seconds) that the job run consumed resources. *

*/ private Integer executionTime; /** *

* The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before * it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent * job. *

*

* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). *

*/ private Integer timeout; /** *

* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *

*

* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a * Worker type and the Number of workers. *

*

* Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

*

* The value that can be allocated for MaxCapacity depends on whether you are running a Python shell * job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

* */ private Double maxCapacity; /** *

* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

* */ private String workerType; /** *

* The number of workers of a defined workerType that are allocated when a job runs. *

*/ private Integer numberOfWorkers; /** *

* The name of the SecurityConfiguration structure to be used with this job run. *

*/ private String securityConfiguration; /** *

* The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using KMS. * This name can be /aws-glue/jobs/, in which case the default encryption is NONE. If you * add a role name and SecurityConfiguration name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration is * used to encrypt the log group. *

*/ private String logGroupName; /** *

* Specifies configuration properties of a job run notification. *

*/ private NotificationProperty notificationProperty; /** *

* In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in * a job. The Python version indicates the version supported for jobs of type Spark. *

*

* Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python * and additional libraries available in your Ray job are determined by the Runtime parameter of the * Job command. *

*

* For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *

*

* Jobs that are created without specifying a Glue version default to Glue 0.9. *

*/ private String glueVersion; /** *

* This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the * lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, * or 0.25 for G.025X workers). This value may be different than the * executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number * of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible * that the value of DPUSeconds is less than executionEngineRuntime * * MaxCapacity. *

*/ private Double dPUSeconds; /** *

* Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

*

* The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *

*

* Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

*/ private String executionClass; /** *

* The ID of this job run. *

* * @param id * The ID of this job run. */ public void setId(String id) { this.id = id; } /** *

* The ID of this job run. *

* * @return The ID of this job run. */ public String getId() { return this.id; } /** *

* The ID of this job run. *

* * @param id * The ID of this job run. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withId(String id) { setId(id); return this; } /** *

* The number of the attempt to run this job. *

* * @param attempt * The number of the attempt to run this job. */ public void setAttempt(Integer attempt) { this.attempt = attempt; } /** *

* The number of the attempt to run this job. *

* * @return The number of the attempt to run this job. */ public Integer getAttempt() { return this.attempt; } /** *

* The number of the attempt to run this job. *

* * @param attempt * The number of the attempt to run this job. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withAttempt(Integer attempt) { setAttempt(attempt); return this; } /** *

* The ID of the previous run of this job. For example, the JobRunId specified in the * StartJobRun action. *

* * @param previousRunId * The ID of the previous run of this job. For example, the JobRunId specified in the * StartJobRun action. */ public void setPreviousRunId(String previousRunId) { this.previousRunId = previousRunId; } /** *

* The ID of the previous run of this job. For example, the JobRunId specified in the * StartJobRun action. *

* * @return The ID of the previous run of this job. For example, the JobRunId specified in the * StartJobRun action. */ public String getPreviousRunId() { return this.previousRunId; } /** *

* The ID of the previous run of this job. For example, the JobRunId specified in the * StartJobRun action. *

* * @param previousRunId * The ID of the previous run of this job. For example, the JobRunId specified in the * StartJobRun action. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withPreviousRunId(String previousRunId) { setPreviousRunId(previousRunId); return this; } /** *

* The name of the trigger that started this job run. *

* * @param triggerName * The name of the trigger that started this job run. */ public void setTriggerName(String triggerName) { this.triggerName = triggerName; } /** *

* The name of the trigger that started this job run. *

* * @return The name of the trigger that started this job run. */ public String getTriggerName() { return this.triggerName; } /** *

* The name of the trigger that started this job run. *

* * @param triggerName * The name of the trigger that started this job run. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withTriggerName(String triggerName) { setTriggerName(triggerName); return this; } /** *

* The name of the job definition being used in this run. *

* * @param jobName * The name of the job definition being used in this run. */ public void setJobName(String jobName) { this.jobName = jobName; } /** *

* The name of the job definition being used in this run. *

* * @return The name of the job definition being used in this run. */ public String getJobName() { return this.jobName; } /** *

* The name of the job definition being used in this run. *

* * @param jobName * The name of the job definition being used in this run. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withJobName(String jobName) { setJobName(jobName); return this; } /** *

* The date and time at which this job run was started. *

* * @param startedOn * The date and time at which this job run was started. */ public void setStartedOn(java.util.Date startedOn) { this.startedOn = startedOn; } /** *

* The date and time at which this job run was started. *

* * @return The date and time at which this job run was started. */ public java.util.Date getStartedOn() { return this.startedOn; } /** *

* The date and time at which this job run was started. *

* * @param startedOn * The date and time at which this job run was started. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withStartedOn(java.util.Date startedOn) { setStartedOn(startedOn); return this; } /** *

* The last time that this job run was modified. *

* * @param lastModifiedOn * The last time that this job run was modified. */ public void setLastModifiedOn(java.util.Date lastModifiedOn) { this.lastModifiedOn = lastModifiedOn; } /** *

* The last time that this job run was modified. *

* * @return The last time that this job run was modified. */ public java.util.Date getLastModifiedOn() { return this.lastModifiedOn; } /** *

* The last time that this job run was modified. *

* * @param lastModifiedOn * The last time that this job run was modified. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withLastModifiedOn(java.util.Date lastModifiedOn) { setLastModifiedOn(lastModifiedOn); return this; } /** *

* The date and time that this job run completed. *

* * @param completedOn * The date and time that this job run completed. */ public void setCompletedOn(java.util.Date completedOn) { this.completedOn = completedOn; } /** *

* The date and time that this job run completed. *

* * @return The date and time that this job run completed. */ public java.util.Date getCompletedOn() { return this.completedOn; } /** *

* The date and time that this job run completed. *

* * @param completedOn * The date and time that this job run completed. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withCompletedOn(java.util.Date completedOn) { setCompletedOn(completedOn); return this; } /** *

* The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. *

* * @param jobRunState * The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. * @see JobRunState */ public void setJobRunState(String jobRunState) { this.jobRunState = jobRunState; } /** *

* The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. *

* * @return The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. * @see JobRunState */ public String getJobRunState() { return this.jobRunState; } /** *

* The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. *

* * @param jobRunState * The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. * @return Returns a reference to this object so that method calls can be chained together. * @see JobRunState */ public JobRun withJobRunState(String jobRunState) { setJobRunState(jobRunState); return this; } /** *

* The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. *

* * @param jobRunState * The current state of the job run. For more information about the statuses of jobs that have terminated * abnormally, see Glue Job Run * Statuses. * @return Returns a reference to this object so that method calls can be chained together. * @see JobRunState */ public JobRun withJobRunState(JobRunState jobRunState) { this.jobRunState = jobRunState.toString(); return this; } /** *

* The job arguments associated with this run. For this job run, they replace the default arguments set in the job * definition itself. *

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *

* * @return The job arguments associated with this run. For this job run, they replace the default arguments set in * the job definition itself.

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that * Glue itself consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters * in Ray jobs in the developer guide. */ public java.util.Map getArguments() { return arguments; } /** *

* The job arguments associated with this run. For this job run, they replace the default arguments set in the job * definition itself. *

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *

* * @param arguments * The job arguments associated with this run. For this job run, they replace the default arguments set in * the job definition itself.

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue * itself consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters * in Ray jobs in the developer guide. */ public void setArguments(java.util.Map arguments) { this.arguments = arguments; } /** *

* The job arguments associated with this run. For this job run, they replace the default arguments set in the job * definition itself. *

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *

* * @param arguments * The job arguments associated with this run. For this job run, they replace the default arguments set in * the job definition itself.

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue * itself consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters * in Ray jobs in the developer guide. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withArguments(java.util.Map arguments) { setArguments(arguments); return this; } /** * Add a single Arguments entry * * @see JobRun#withArguments * @returns a reference to this object so that method calls can be chained together. */ public JobRun addArgumentsEntry(String key, String value) { if (null == this.arguments) { this.arguments = new java.util.HashMap(); } if (this.arguments.containsKey(key)) throw new IllegalArgumentException("Duplicated keys (" + key.toString() + ") are provided."); this.arguments.put(key, value); return this; } /** * Removes all the entries added into Arguments. * * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun clearArgumentsEntries() { this.arguments = null; return this; } /** *

* An error message associated with this job run. *

* * @param errorMessage * An error message associated with this job run. */ public void setErrorMessage(String errorMessage) { this.errorMessage = errorMessage; } /** *

* An error message associated with this job run. *

* * @return An error message associated with this job run. */ public String getErrorMessage() { return this.errorMessage; } /** *

* An error message associated with this job run. *

* * @param errorMessage * An error message associated with this job run. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withErrorMessage(String errorMessage) { setErrorMessage(errorMessage); return this; } /** *

* A list of predecessors to this job run. *

* * @return A list of predecessors to this job run. */ public java.util.List getPredecessorRuns() { return predecessorRuns; } /** *

* A list of predecessors to this job run. *

* * @param predecessorRuns * A list of predecessors to this job run. */ public void setPredecessorRuns(java.util.Collection predecessorRuns) { if (predecessorRuns == null) { this.predecessorRuns = null; return; } this.predecessorRuns = new java.util.ArrayList(predecessorRuns); } /** *

* A list of predecessors to this job run. *

*

* NOTE: This method appends the values to the existing list (if any). Use * {@link #setPredecessorRuns(java.util.Collection)} or {@link #withPredecessorRuns(java.util.Collection)} if you * want to override the existing values. *

* * @param predecessorRuns * A list of predecessors to this job run. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withPredecessorRuns(Predecessor... predecessorRuns) { if (this.predecessorRuns == null) { setPredecessorRuns(new java.util.ArrayList(predecessorRuns.length)); } for (Predecessor ele : predecessorRuns) { this.predecessorRuns.add(ele); } return this; } /** *

* A list of predecessors to this job run. *

* * @param predecessorRuns * A list of predecessors to this job run. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withPredecessorRuns(java.util.Collection predecessorRuns) { setPredecessorRuns(predecessorRuns); return this; } /** *

* This field is deprecated. Use MaxCapacity instead. *

*

* The number of Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *

* * @param allocatedCapacity * This field is deprecated. Use MaxCapacity instead.

*

* The number of Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be * allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. */ @Deprecated public void setAllocatedCapacity(Integer allocatedCapacity) { this.allocatedCapacity = allocatedCapacity; } /** *

* This field is deprecated. Use MaxCapacity instead. *

*

* The number of Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *

* * @return This field is deprecated. Use MaxCapacity instead.

*

* The number of Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be * allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. */ @Deprecated public Integer getAllocatedCapacity() { return this.allocatedCapacity; } /** *

* This field is deprecated. Use MaxCapacity instead. *

*

* The number of Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *

* * @param allocatedCapacity * This field is deprecated. Use MaxCapacity instead.

*

* The number of Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be * allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. * @return Returns a reference to this object so that method calls can be chained together. */ @Deprecated public JobRun withAllocatedCapacity(Integer allocatedCapacity) { setAllocatedCapacity(allocatedCapacity); return this; } /** *

* The amount of time (in seconds) that the job run consumed resources. *

* * @param executionTime * The amount of time (in seconds) that the job run consumed resources. */ public void setExecutionTime(Integer executionTime) { this.executionTime = executionTime; } /** *

* The amount of time (in seconds) that the job run consumed resources. *

* * @return The amount of time (in seconds) that the job run consumed resources. */ public Integer getExecutionTime() { return this.executionTime; } /** *

* The amount of time (in seconds) that the job run consumed resources. *

* * @param executionTime * The amount of time (in seconds) that the job run consumed resources. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withExecutionTime(Integer executionTime) { setExecutionTime(executionTime); return this; } /** *

* The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before * it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent * job. *

*

* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). *

* * @param timeout * The JobRun timeout in minutes. This is the maximum time that a job run can consume resources * before it is terminated and enters TIMEOUT status. This value overrides the timeout value set * in the parent job.

*

* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). */ public void setTimeout(Integer timeout) { this.timeout = timeout; } /** *

* The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before * it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent * job. *

*

* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). *

* * @return The JobRun timeout in minutes. This is the maximum time that a job run can consume resources * before it is terminated and enters TIMEOUT status. This value overrides the timeout value * set in the parent job.

*

* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). */ public Integer getTimeout() { return this.timeout; } /** *

* The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before * it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent * job. *

*

* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). *

* * @param timeout * The JobRun timeout in minutes. This is the maximum time that a job run can consume resources * before it is terminated and enters TIMEOUT status. This value overrides the timeout value set * in the parent job.

*

* Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withTimeout(Integer timeout) { setTimeout(timeout); return this; } /** *

* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *

*

* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a * Worker type and the Number of workers. *

*

* Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

*

* The value that can be allocated for MaxCapacity depends on whether you are running a Python shell * job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

*
    *
  • *

    * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 * or 1 DPU. The default is 0.0625 DPU. *

    *
  • *
  • *

    * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL * job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. * This job type cannot have a fractional DPU allocation. *

    *
  • *
* * @param maxCapacity * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing * units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power * that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

*

* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should * specify a Worker type and the Number of workers. *

*

* Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

*

* The value that can be allocated for MaxCapacity depends on whether you are running a Python * shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

*
    *
  • *

    * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either * 0.0625 or 1 DPU. The default is 0.0625 DPU. *

    *
  • *
  • *

    * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark * streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The * default is 10 DPUs. This job type cannot have a fractional DPU allocation. *

    *
  • */ public void setMaxCapacity(Double maxCapacity) { this.maxCapacity = maxCapacity; } /** *

    * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *

    *

    * For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a * Worker type and the Number of workers. *

    *

    * Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

    *

    * The value that can be allocated for MaxCapacity depends on whether you are running a Python shell * job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

    *
      *
    • *

      * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 * or 1 DPU. The default is 0.0625 DPU. *

      *
    • *
    • *

      * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL * job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. * This job type cannot have a fractional DPU allocation. *

      *
    • *
    * * @return For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing * units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power * that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

    *

    * For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should * specify a Worker type and the Number of workers. *

    *

    * Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

    *

    * The value that can be allocated for MaxCapacity depends on whether you are running a Python * shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

    *
      *
    • *

      * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either * 0.0625 or 1 DPU. The default is 0.0625 DPU. *

      *
    • *
    • *

      * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark * streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. * The default is 10 DPUs. This job type cannot have a fractional DPU allocation. *

      *
    • */ public Double getMaxCapacity() { return this.maxCapacity; } /** *

      * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *

      *

      * For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a * Worker type and the Number of workers. *

      *

      * Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

      *

      * The value that can be allocated for MaxCapacity depends on whether you are running a Python shell * job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

      *
        *
      • *

        * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 * or 1 DPU. The default is 0.0625 DPU. *

        *
      • *
      • *

        * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL * job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. * This job type cannot have a fractional DPU allocation. *

        *
      • *
      * * @param maxCapacity * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing * units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power * that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      *

      * For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should * specify a Worker type and the Number of workers. *

      *

      * Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

      *

      * The value that can be allocated for MaxCapacity depends on whether you are running a Python * shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

      *
        *
      • *

        * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either * 0.0625 or 1 DPU. The default is 0.0625 DPU. *

        *
      • *
      • *

        * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark * streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The * default is 10 DPUs. This job type cannot have a fractional DPU allocation. *

        *
      • * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withMaxCapacity(Double maxCapacity) { setMaxCapacity(maxCapacity); return this; } /** *

        * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

        *
          *
        • *

          * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such * as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

          *
        • *
        • *

          * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk * (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such * as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

          *
        • *
        • *

          * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk * (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available * only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), * US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), * Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

          *
        • *
        • *

          * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk * (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available * only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the * G.4X worker type. *

          *
        • *
        • *

          * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume * streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. *

          *
        • *
        • *

          * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk * (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. *

          *
        • *
        * * @param workerType * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X * or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

        *
          *
        • *

          * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads * such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

          *
        • *
        • *

          * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB * disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for * workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run * most jobs. *

          *
        • *
        • *

          * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB * disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for * jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker * type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services * Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe * (Stockholm). *

          *
        • *
        • *

          * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for * jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker * type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type. *

          *
        • *
        • *

          * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB * disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low * volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. *

          *
        • *
        • *

          * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB * disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. *

          *
        • * @see WorkerType */ public void setWorkerType(String workerType) { this.workerType = workerType; } /** *

          * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

          *
            *
          • *

            * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such * as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

            *
          • *
          • *

            * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk * (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such * as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

            *
          • *
          • *

            * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk * (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available * only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), * US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), * Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

            *
          • *
          • *

            * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk * (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available * only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the * G.4X worker type. *

            *
          • *
          • *

            * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume * streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. *

            *
          • *
          • *

            * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk * (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. *

            *
          • *
          * * @return The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, * G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

          *
            *
          • *

            * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB * disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for * workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run * most jobs. *

            *
          • *
          • *

            * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB * disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for * workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run * most jobs. *

            *
          • *
          • *

            * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB * disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for * jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This * worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web * Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia * Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and * Europe (Stockholm). *

            *
          • *
          • *

            * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for * jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This * worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web * Services Regions as supported for the G.4X worker type. *

            *
          • *
          • *

            * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB * disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low * volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. *

            *
          • *
          • *

            * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB * disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. *

            *
          • * @see WorkerType */ public String getWorkerType() { return this.workerType; } /** *

            * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

            *
              *
            • *

              * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such * as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

              *
            • *
            • *

              * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk * (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such * as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

              *
            • *
            • *

              * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk * (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available * only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), * US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), * Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

              *
            • *
            • *

              * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk * (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available * only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the * G.4X worker type. *

              *
            • *
            • *

              * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume * streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. *

              *
            • *
            • *

              * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk * (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. *

              *
            • *
            * * @param workerType * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X * or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

            *
              *
            • *

              * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads * such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

              *
            • *
            • *

              * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB * disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for * workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run * most jobs. *

              *
            • *
            • *

              * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB * disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for * jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker * type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services * Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe * (Stockholm). *

              *
            • *
            • *

              * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for * jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker * type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type. *

              *
            • *
            • *

              * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB * disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low * volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. *

              *
            • *
            • *

              * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB * disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. *

              *
            • * @return Returns a reference to this object so that method calls can be chained together. * @see WorkerType */ public JobRun withWorkerType(String workerType) { setWorkerType(workerType); return this; } /** *

              * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

              *
                *
              • *

                * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such * as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

                *
              • *
              • *

                * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk * (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such * as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

                *
              • *
              • *

                * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk * (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available * only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), * US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), * Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

                *
              • *
              • *

                * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk * (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available * only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the * G.4X worker type. *

                *
              • *
              • *

                * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume * streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. *

                *
              • *
              • *

                * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk * (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. *

                *
              • *
              * * @param workerType * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X * or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

              *
                *
              • *

                * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk * (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads * such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

                *
              • *
              • *

                * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB * disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for * workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run * most jobs. *

                *
              • *
              • *

                * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB * disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for * jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker * type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services * Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe * (Stockholm). *

                *
              • *
              • *

                * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for * jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker * type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type. *

                *
              • *
              • *

                * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB * disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low * volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. *

                *
              • *
              • *

                * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB * disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. *

                *
              • * @return Returns a reference to this object so that method calls can be chained together. * @see WorkerType */ public JobRun withWorkerType(WorkerType workerType) { this.workerType = workerType.toString(); return this; } /** *

                * The number of workers of a defined workerType that are allocated when a job runs. *

                * * @param numberOfWorkers * The number of workers of a defined workerType that are allocated when a job runs. */ public void setNumberOfWorkers(Integer numberOfWorkers) { this.numberOfWorkers = numberOfWorkers; } /** *

                * The number of workers of a defined workerType that are allocated when a job runs. *

                * * @return The number of workers of a defined workerType that are allocated when a job runs. */ public Integer getNumberOfWorkers() { return this.numberOfWorkers; } /** *

                * The number of workers of a defined workerType that are allocated when a job runs. *

                * * @param numberOfWorkers * The number of workers of a defined workerType that are allocated when a job runs. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withNumberOfWorkers(Integer numberOfWorkers) { setNumberOfWorkers(numberOfWorkers); return this; } /** *

                * The name of the SecurityConfiguration structure to be used with this job run. *

                * * @param securityConfiguration * The name of the SecurityConfiguration structure to be used with this job run. */ public void setSecurityConfiguration(String securityConfiguration) { this.securityConfiguration = securityConfiguration; } /** *

                * The name of the SecurityConfiguration structure to be used with this job run. *

                * * @return The name of the SecurityConfiguration structure to be used with this job run. */ public String getSecurityConfiguration() { return this.securityConfiguration; } /** *

                * The name of the SecurityConfiguration structure to be used with this job run. *

                * * @param securityConfiguration * The name of the SecurityConfiguration structure to be used with this job run. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withSecurityConfiguration(String securityConfiguration) { setSecurityConfiguration(securityConfiguration); return this; } /** *

                * The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using KMS. * This name can be /aws-glue/jobs/, in which case the default encryption is NONE. If you * add a role name and SecurityConfiguration name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration is * used to encrypt the log group. *

                * * @param logGroupName * The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using * KMS. This name can be /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration * is used to encrypt the log group. */ public void setLogGroupName(String logGroupName) { this.logGroupName = logGroupName; } /** *

                * The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using KMS. * This name can be /aws-glue/jobs/, in which case the default encryption is NONE. If you * add a role name and SecurityConfiguration name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration is * used to encrypt the log group. *

                * * @return The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using * KMS. This name can be /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security * configuration is used to encrypt the log group. */ public String getLogGroupName() { return this.logGroupName; } /** *

                * The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using KMS. * This name can be /aws-glue/jobs/, in which case the default encryption is NONE. If you * add a role name and SecurityConfiguration name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration is * used to encrypt the log group. *

                * * @param logGroupName * The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using * KMS. This name can be /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration * is used to encrypt the log group. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withLogGroupName(String logGroupName) { setLogGroupName(logGroupName); return this; } /** *

                * Specifies configuration properties of a job run notification. *

                * * @param notificationProperty * Specifies configuration properties of a job run notification. */ public void setNotificationProperty(NotificationProperty notificationProperty) { this.notificationProperty = notificationProperty; } /** *

                * Specifies configuration properties of a job run notification. *

                * * @return Specifies configuration properties of a job run notification. */ public NotificationProperty getNotificationProperty() { return this.notificationProperty; } /** *

                * Specifies configuration properties of a job run notification. *

                * * @param notificationProperty * Specifies configuration properties of a job run notification. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withNotificationProperty(NotificationProperty notificationProperty) { setNotificationProperty(notificationProperty); return this; } /** *

                * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in * a job. The Python version indicates the version supported for jobs of type Spark. *

                *

                * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python * and additional libraries available in your Ray job are determined by the Runtime parameter of the * Job command. *

                *

                * For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *

                *

                * Jobs that are created without specifying a Glue version default to Glue 0.9. *

                * * @param glueVersion * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue * available in a job. The Python version indicates the version supported for jobs of type Spark.

                *

                * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, * Python and additional libraries available in your Ray job are determined by the Runtime * parameter of the Job command. *

                *

                * For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *

                *

                * Jobs that are created without specifying a Glue version default to Glue 0.9. */ public void setGlueVersion(String glueVersion) { this.glueVersion = glueVersion; } /** *

                * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in * a job. The Python version indicates the version supported for jobs of type Spark. *

                *

                * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python * and additional libraries available in your Ray job are determined by the Runtime parameter of the * Job command. *

                *

                * For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *

                *

                * Jobs that are created without specifying a Glue version default to Glue 0.9. *

                * * @return In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue * available in a job. The Python version indicates the version supported for jobs of type Spark.

                *

                * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of * Ray, Python and additional libraries available in your Ray job are determined by the Runtime * parameter of the Job command. *

                *

                * For more information about the available Glue versions and corresponding Spark and Python versions, see * Glue version in the developer * guide. *

                *

                * Jobs that are created without specifying a Glue version default to Glue 0.9. */ public String getGlueVersion() { return this.glueVersion; } /** *

                * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in * a job. The Python version indicates the version supported for jobs of type Spark. *

                *

                * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python * and additional libraries available in your Ray job are determined by the Runtime parameter of the * Job command. *

                *

                * For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *

                *

                * Jobs that are created without specifying a Glue version default to Glue 0.9. *

                * * @param glueVersion * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue * available in a job. The Python version indicates the version supported for jobs of type Spark.

                *

                * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, * Python and additional libraries available in your Ray job are determined by the Runtime * parameter of the Job command. *

                *

                * For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *

                *

                * Jobs that are created without specifying a Glue version default to Glue 0.9. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withGlueVersion(String glueVersion) { setGlueVersion(glueVersion); return this; } /** *

                * This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the * lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, * or 0.25 for G.025X workers). This value may be different than the * executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number * of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible * that the value of DPUSeconds is less than executionEngineRuntime * * MaxCapacity. *

                * * @param dPUSeconds * This field populates only for Auto Scaling job runs, and represents the total time each executor ran * during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for * G.2X, or 0.25 for G.025X workers). This value may be different than the * executionEngineRuntime MaxCapacity as in the case of Auto Scaling jobs, as the * number of executors running at a given time may be less than the MaxCapacity. Therefore, it * is possible that the value of DPUSeconds is less than executionEngineRuntime * * MaxCapacity. */ public void setDPUSeconds(Double dPUSeconds) { this.dPUSeconds = dPUSeconds; } /** *

                * This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the * lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, * or 0.25 for G.025X workers). This value may be different than the * executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number * of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible * that the value of DPUSeconds is less than executionEngineRuntime * * MaxCapacity. *

                * * @return This field populates only for Auto Scaling job runs, and represents the total time each executor ran * during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for * G.2X, or 0.25 for G.025X workers). This value may be different than the * executionEngineRuntime MaxCapacity as in the case of Auto Scaling jobs, as the * number of executors running at a given time may be less than the MaxCapacity. Therefore, it * is possible that the value of DPUSeconds is less than executionEngineRuntime * * MaxCapacity. */ public Double getDPUSeconds() { return this.dPUSeconds; } /** *

                * This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the * lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, * or 0.25 for G.025X workers). This value may be different than the * executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number * of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible * that the value of DPUSeconds is less than executionEngineRuntime * * MaxCapacity. *

                * * @param dPUSeconds * This field populates only for Auto Scaling job runs, and represents the total time each executor ran * during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for * G.2X, or 0.25 for G.025X workers). This value may be different than the * executionEngineRuntime MaxCapacity as in the case of Auto Scaling jobs, as the * number of executors running at a given time may be less than the MaxCapacity. Therefore, it * is possible that the value of DPUSeconds is less than executionEngineRuntime * * MaxCapacity. * @return Returns a reference to this object so that method calls can be chained together. */ public JobRun withDPUSeconds(Double dPUSeconds) { setDPUSeconds(dPUSeconds); return this; } /** *

                * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

                *

                * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *

                *

                * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

                * * @param executionClass * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class * is ideal for time-sensitive workloads that require fast job startup and dedicated resources.

                *

                * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *

                *

                * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark * jobs. * @see ExecutionClass */ public void setExecutionClass(String executionClass) { this.executionClass = executionClass; } /** *

                * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

                *

                * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *

                *

                * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

                * * @return Indicates whether the job is run with a standard or flexible execution class. The standard * execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated * resources.

                *

                * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times * may vary. *

                *

                * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark * jobs. * @see ExecutionClass */ public String getExecutionClass() { return this.executionClass; } /** *

                * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

                *

                * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *

                *

                * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

                * * @param executionClass * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class * is ideal for time-sensitive workloads that require fast job startup and dedicated resources.

                *

                * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *

                *

                * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark * jobs. * @return Returns a reference to this object so that method calls can be chained together. * @see ExecutionClass */ public JobRun withExecutionClass(String executionClass) { setExecutionClass(executionClass); return this; } /** *

                * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

                *

                * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *

                *

                * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

                * * @param executionClass * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class * is ideal for time-sensitive workloads that require fast job startup and dedicated resources.

                *

                * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *

                *

                * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark * jobs. * @return Returns a reference to this object so that method calls can be chained together. * @see ExecutionClass */ public JobRun withExecutionClass(ExecutionClass executionClass) { this.executionClass = executionClass.toString(); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getId() != null) sb.append("Id: ").append(getId()).append(","); if (getAttempt() != null) sb.append("Attempt: ").append(getAttempt()).append(","); if (getPreviousRunId() != null) sb.append("PreviousRunId: ").append(getPreviousRunId()).append(","); if (getTriggerName() != null) sb.append("TriggerName: ").append(getTriggerName()).append(","); if (getJobName() != null) sb.append("JobName: ").append(getJobName()).append(","); if (getStartedOn() != null) sb.append("StartedOn: ").append(getStartedOn()).append(","); if (getLastModifiedOn() != null) sb.append("LastModifiedOn: ").append(getLastModifiedOn()).append(","); if (getCompletedOn() != null) sb.append("CompletedOn: ").append(getCompletedOn()).append(","); if (getJobRunState() != null) sb.append("JobRunState: ").append(getJobRunState()).append(","); if (getArguments() != null) sb.append("Arguments: ").append(getArguments()).append(","); if (getErrorMessage() != null) sb.append("ErrorMessage: ").append(getErrorMessage()).append(","); if (getPredecessorRuns() != null) sb.append("PredecessorRuns: ").append(getPredecessorRuns()).append(","); if (getAllocatedCapacity() != null) sb.append("AllocatedCapacity: ").append(getAllocatedCapacity()).append(","); if (getExecutionTime() != null) sb.append("ExecutionTime: ").append(getExecutionTime()).append(","); if (getTimeout() != null) sb.append("Timeout: ").append(getTimeout()).append(","); if (getMaxCapacity() != null) sb.append("MaxCapacity: ").append(getMaxCapacity()).append(","); if (getWorkerType() != null) sb.append("WorkerType: ").append(getWorkerType()).append(","); if (getNumberOfWorkers() != null) sb.append("NumberOfWorkers: ").append(getNumberOfWorkers()).append(","); if (getSecurityConfiguration() != null) sb.append("SecurityConfiguration: ").append(getSecurityConfiguration()).append(","); if (getLogGroupName() != null) sb.append("LogGroupName: ").append(getLogGroupName()).append(","); if (getNotificationProperty() != null) sb.append("NotificationProperty: ").append(getNotificationProperty()).append(","); if (getGlueVersion() != null) sb.append("GlueVersion: ").append(getGlueVersion()).append(","); if (getDPUSeconds() != null) sb.append("DPUSeconds: ").append(getDPUSeconds()).append(","); if (getExecutionClass() != null) sb.append("ExecutionClass: ").append(getExecutionClass()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof JobRun == false) return false; JobRun other = (JobRun) obj; if (other.getId() == null ^ this.getId() == null) return false; if (other.getId() != null && other.getId().equals(this.getId()) == false) return false; if (other.getAttempt() == null ^ this.getAttempt() == null) return false; if (other.getAttempt() != null && other.getAttempt().equals(this.getAttempt()) == false) return false; if (other.getPreviousRunId() == null ^ this.getPreviousRunId() == null) return false; if (other.getPreviousRunId() != null && other.getPreviousRunId().equals(this.getPreviousRunId()) == false) return false; if (other.getTriggerName() == null ^ this.getTriggerName() == null) return false; if (other.getTriggerName() != null && other.getTriggerName().equals(this.getTriggerName()) == false) return false; if (other.getJobName() == null ^ this.getJobName() == null) return false; if (other.getJobName() != null && other.getJobName().equals(this.getJobName()) == false) return false; if (other.getStartedOn() == null ^ this.getStartedOn() == null) return false; if (other.getStartedOn() != null && other.getStartedOn().equals(this.getStartedOn()) == false) return false; if (other.getLastModifiedOn() == null ^ this.getLastModifiedOn() == null) return false; if (other.getLastModifiedOn() != null && other.getLastModifiedOn().equals(this.getLastModifiedOn()) == false) return false; if (other.getCompletedOn() == null ^ this.getCompletedOn() == null) return false; if (other.getCompletedOn() != null && other.getCompletedOn().equals(this.getCompletedOn()) == false) return false; if (other.getJobRunState() == null ^ this.getJobRunState() == null) return false; if (other.getJobRunState() != null && other.getJobRunState().equals(this.getJobRunState()) == false) return false; if (other.getArguments() == null ^ this.getArguments() == null) return false; if (other.getArguments() != null && other.getArguments().equals(this.getArguments()) == false) return false; if (other.getErrorMessage() == null ^ this.getErrorMessage() == null) return false; if (other.getErrorMessage() != null && other.getErrorMessage().equals(this.getErrorMessage()) == false) return false; if (other.getPredecessorRuns() == null ^ this.getPredecessorRuns() == null) return false; if (other.getPredecessorRuns() != null && other.getPredecessorRuns().equals(this.getPredecessorRuns()) == false) return false; if (other.getAllocatedCapacity() == null ^ this.getAllocatedCapacity() == null) return false; if (other.getAllocatedCapacity() != null && other.getAllocatedCapacity().equals(this.getAllocatedCapacity()) == false) return false; if (other.getExecutionTime() == null ^ this.getExecutionTime() == null) return false; if (other.getExecutionTime() != null && other.getExecutionTime().equals(this.getExecutionTime()) == false) return false; if (other.getTimeout() == null ^ this.getTimeout() == null) return false; if (other.getTimeout() != null && other.getTimeout().equals(this.getTimeout()) == false) return false; if (other.getMaxCapacity() == null ^ this.getMaxCapacity() == null) return false; if (other.getMaxCapacity() != null && other.getMaxCapacity().equals(this.getMaxCapacity()) == false) return false; if (other.getWorkerType() == null ^ this.getWorkerType() == null) return false; if (other.getWorkerType() != null && other.getWorkerType().equals(this.getWorkerType()) == false) return false; if (other.getNumberOfWorkers() == null ^ this.getNumberOfWorkers() == null) return false; if (other.getNumberOfWorkers() != null && other.getNumberOfWorkers().equals(this.getNumberOfWorkers()) == false) return false; if (other.getSecurityConfiguration() == null ^ this.getSecurityConfiguration() == null) return false; if (other.getSecurityConfiguration() != null && other.getSecurityConfiguration().equals(this.getSecurityConfiguration()) == false) return false; if (other.getLogGroupName() == null ^ this.getLogGroupName() == null) return false; if (other.getLogGroupName() != null && other.getLogGroupName().equals(this.getLogGroupName()) == false) return false; if (other.getNotificationProperty() == null ^ this.getNotificationProperty() == null) return false; if (other.getNotificationProperty() != null && other.getNotificationProperty().equals(this.getNotificationProperty()) == false) return false; if (other.getGlueVersion() == null ^ this.getGlueVersion() == null) return false; if (other.getGlueVersion() != null && other.getGlueVersion().equals(this.getGlueVersion()) == false) return false; if (other.getDPUSeconds() == null ^ this.getDPUSeconds() == null) return false; if (other.getDPUSeconds() != null && other.getDPUSeconds().equals(this.getDPUSeconds()) == false) return false; if (other.getExecutionClass() == null ^ this.getExecutionClass() == null) return false; if (other.getExecutionClass() != null && other.getExecutionClass().equals(this.getExecutionClass()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getId() == null) ? 0 : getId().hashCode()); hashCode = prime * hashCode + ((getAttempt() == null) ? 0 : getAttempt().hashCode()); hashCode = prime * hashCode + ((getPreviousRunId() == null) ? 0 : getPreviousRunId().hashCode()); hashCode = prime * hashCode + ((getTriggerName() == null) ? 0 : getTriggerName().hashCode()); hashCode = prime * hashCode + ((getJobName() == null) ? 0 : getJobName().hashCode()); hashCode = prime * hashCode + ((getStartedOn() == null) ? 0 : getStartedOn().hashCode()); hashCode = prime * hashCode + ((getLastModifiedOn() == null) ? 0 : getLastModifiedOn().hashCode()); hashCode = prime * hashCode + ((getCompletedOn() == null) ? 0 : getCompletedOn().hashCode()); hashCode = prime * hashCode + ((getJobRunState() == null) ? 0 : getJobRunState().hashCode()); hashCode = prime * hashCode + ((getArguments() == null) ? 0 : getArguments().hashCode()); hashCode = prime * hashCode + ((getErrorMessage() == null) ? 0 : getErrorMessage().hashCode()); hashCode = prime * hashCode + ((getPredecessorRuns() == null) ? 0 : getPredecessorRuns().hashCode()); hashCode = prime * hashCode + ((getAllocatedCapacity() == null) ? 0 : getAllocatedCapacity().hashCode()); hashCode = prime * hashCode + ((getExecutionTime() == null) ? 0 : getExecutionTime().hashCode()); hashCode = prime * hashCode + ((getTimeout() == null) ? 0 : getTimeout().hashCode()); hashCode = prime * hashCode + ((getMaxCapacity() == null) ? 0 : getMaxCapacity().hashCode()); hashCode = prime * hashCode + ((getWorkerType() == null) ? 0 : getWorkerType().hashCode()); hashCode = prime * hashCode + ((getNumberOfWorkers() == null) ? 0 : getNumberOfWorkers().hashCode()); hashCode = prime * hashCode + ((getSecurityConfiguration() == null) ? 0 : getSecurityConfiguration().hashCode()); hashCode = prime * hashCode + ((getLogGroupName() == null) ? 0 : getLogGroupName().hashCode()); hashCode = prime * hashCode + ((getNotificationProperty() == null) ? 0 : getNotificationProperty().hashCode()); hashCode = prime * hashCode + ((getGlueVersion() == null) ? 0 : getGlueVersion().hashCode()); hashCode = prime * hashCode + ((getDPUSeconds() == null) ? 0 : getDPUSeconds().hashCode()); hashCode = prime * hashCode + ((getExecutionClass() == null) ? 0 : getExecutionClass().hashCode()); return hashCode; } @Override public JobRun clone() { try { return (JobRun) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.glue.model.transform.JobRunMarshaller.getInstance().marshall(this, protocolMarshaller); } }