/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Contains information about a job run.See Also:
AWS API
* Reference
The ID of this job run.
*/ inline const Aws::String& GetId() const{ return m_id; } /** *The ID of this job run.
*/ inline bool IdHasBeenSet() const { return m_idHasBeenSet; } /** *The ID of this job run.
*/ inline void SetId(const Aws::String& value) { m_idHasBeenSet = true; m_id = value; } /** *The ID of this job run.
*/ inline void SetId(Aws::String&& value) { m_idHasBeenSet = true; m_id = std::move(value); } /** *The ID of this job run.
*/ inline void SetId(const char* value) { m_idHasBeenSet = true; m_id.assign(value); } /** *The ID of this job run.
*/ inline JobRun& WithId(const Aws::String& value) { SetId(value); return *this;} /** *The ID of this job run.
*/ inline JobRun& WithId(Aws::String&& value) { SetId(std::move(value)); return *this;} /** *The ID of this job run.
*/ inline JobRun& WithId(const char* value) { SetId(value); return *this;} /** *The number of the attempt to run this job.
*/ inline int GetAttempt() const{ return m_attempt; } /** *The number of the attempt to run this job.
*/ inline bool AttemptHasBeenSet() const { return m_attemptHasBeenSet; } /** *The number of the attempt to run this job.
*/ inline void SetAttempt(int value) { m_attemptHasBeenSet = true; m_attempt = value; } /** *The number of the attempt to run this job.
*/ inline JobRun& WithAttempt(int value) { SetAttempt(value); return *this;} /** *The ID of the previous run of this job. For example, the
* JobRunId
specified in the StartJobRun
action.
The ID of the previous run of this job. For example, the
* JobRunId
specified in the StartJobRun
action.
The ID of the previous run of this job. For example, the
* JobRunId
specified in the StartJobRun
action.
The ID of the previous run of this job. For example, the
* JobRunId
specified in the StartJobRun
action.
The ID of the previous run of this job. For example, the
* JobRunId
specified in the StartJobRun
action.
The ID of the previous run of this job. For example, the
* JobRunId
specified in the StartJobRun
action.
The ID of the previous run of this job. For example, the
* JobRunId
specified in the StartJobRun
action.
The ID of the previous run of this job. For example, the
* JobRunId
specified in the StartJobRun
action.
The name of the trigger that started this job run.
*/ inline const Aws::String& GetTriggerName() const{ return m_triggerName; } /** *The name of the trigger that started this job run.
*/ inline bool TriggerNameHasBeenSet() const { return m_triggerNameHasBeenSet; } /** *The name of the trigger that started this job run.
*/ inline void SetTriggerName(const Aws::String& value) { m_triggerNameHasBeenSet = true; m_triggerName = value; } /** *The name of the trigger that started this job run.
*/ inline void SetTriggerName(Aws::String&& value) { m_triggerNameHasBeenSet = true; m_triggerName = std::move(value); } /** *The name of the trigger that started this job run.
*/ inline void SetTriggerName(const char* value) { m_triggerNameHasBeenSet = true; m_triggerName.assign(value); } /** *The name of the trigger that started this job run.
*/ inline JobRun& WithTriggerName(const Aws::String& value) { SetTriggerName(value); return *this;} /** *The name of the trigger that started this job run.
*/ inline JobRun& WithTriggerName(Aws::String&& value) { SetTriggerName(std::move(value)); return *this;} /** *The name of the trigger that started this job run.
*/ inline JobRun& WithTriggerName(const char* value) { SetTriggerName(value); return *this;} /** *The name of the job definition being used in this run.
*/ inline const Aws::String& GetJobName() const{ return m_jobName; } /** *The name of the job definition being used in this run.
*/ inline bool JobNameHasBeenSet() const { return m_jobNameHasBeenSet; } /** *The name of the job definition being used in this run.
*/ inline void SetJobName(const Aws::String& value) { m_jobNameHasBeenSet = true; m_jobName = value; } /** *The name of the job definition being used in this run.
*/ inline void SetJobName(Aws::String&& value) { m_jobNameHasBeenSet = true; m_jobName = std::move(value); } /** *The name of the job definition being used in this run.
*/ inline void SetJobName(const char* value) { m_jobNameHasBeenSet = true; m_jobName.assign(value); } /** *The name of the job definition being used in this run.
*/ inline JobRun& WithJobName(const Aws::String& value) { SetJobName(value); return *this;} /** *The name of the job definition being used in this run.
*/ inline JobRun& WithJobName(Aws::String&& value) { SetJobName(std::move(value)); return *this;} /** *The name of the job definition being used in this run.
*/ inline JobRun& WithJobName(const char* value) { SetJobName(value); return *this;} /** *The date and time at which this job run was started.
*/ inline const Aws::Utils::DateTime& GetStartedOn() const{ return m_startedOn; } /** *The date and time at which this job run was started.
*/ inline bool StartedOnHasBeenSet() const { return m_startedOnHasBeenSet; } /** *The date and time at which this job run was started.
*/ inline void SetStartedOn(const Aws::Utils::DateTime& value) { m_startedOnHasBeenSet = true; m_startedOn = value; } /** *The date and time at which this job run was started.
*/ inline void SetStartedOn(Aws::Utils::DateTime&& value) { m_startedOnHasBeenSet = true; m_startedOn = std::move(value); } /** *The date and time at which this job run was started.
*/ inline JobRun& WithStartedOn(const Aws::Utils::DateTime& value) { SetStartedOn(value); return *this;} /** *The date and time at which this job run was started.
*/ inline JobRun& WithStartedOn(Aws::Utils::DateTime&& value) { SetStartedOn(std::move(value)); return *this;} /** *The last time that this job run was modified.
*/ inline const Aws::Utils::DateTime& GetLastModifiedOn() const{ return m_lastModifiedOn; } /** *The last time that this job run was modified.
*/ inline bool LastModifiedOnHasBeenSet() const { return m_lastModifiedOnHasBeenSet; } /** *The last time that this job run was modified.
*/ inline void SetLastModifiedOn(const Aws::Utils::DateTime& value) { m_lastModifiedOnHasBeenSet = true; m_lastModifiedOn = value; } /** *The last time that this job run was modified.
*/ inline void SetLastModifiedOn(Aws::Utils::DateTime&& value) { m_lastModifiedOnHasBeenSet = true; m_lastModifiedOn = std::move(value); } /** *The last time that this job run was modified.
*/ inline JobRun& WithLastModifiedOn(const Aws::Utils::DateTime& value) { SetLastModifiedOn(value); return *this;} /** *The last time that this job run was modified.
*/ inline JobRun& WithLastModifiedOn(Aws::Utils::DateTime&& value) { SetLastModifiedOn(std::move(value)); return *this;} /** *The date and time that this job run completed.
*/ inline const Aws::Utils::DateTime& GetCompletedOn() const{ return m_completedOn; } /** *The date and time that this job run completed.
*/ inline bool CompletedOnHasBeenSet() const { return m_completedOnHasBeenSet; } /** *The date and time that this job run completed.
*/ inline void SetCompletedOn(const Aws::Utils::DateTime& value) { m_completedOnHasBeenSet = true; m_completedOn = value; } /** *The date and time that this job run completed.
*/ inline void SetCompletedOn(Aws::Utils::DateTime&& value) { m_completedOnHasBeenSet = true; m_completedOn = std::move(value); } /** *The date and time that this job run completed.
*/ inline JobRun& WithCompletedOn(const Aws::Utils::DateTime& value) { SetCompletedOn(value); return *this;} /** *The date and time that this job run completed.
*/ inline JobRun& WithCompletedOn(Aws::Utils::DateTime&& value) { SetCompletedOn(std::move(value)); return *this;} /** *The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.
*/ inline const JobRunState& GetJobRunState() const{ return m_jobRunState; } /** *The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.
*/ inline bool JobRunStateHasBeenSet() const { return m_jobRunStateHasBeenSet; } /** *The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.
*/ inline void SetJobRunState(const JobRunState& value) { m_jobRunStateHasBeenSet = true; m_jobRunState = value; } /** *The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.
*/ inline void SetJobRunState(JobRunState&& value) { m_jobRunStateHasBeenSet = true; m_jobRunState = std::move(value); } /** *The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.
*/ inline JobRun& WithJobRunState(const JobRunState& value) { SetJobRunState(value); return *this;} /** *The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.
*/ inline JobRun& WithJobRunState(JobRunState&& value) { SetJobRunState(std::move(value)); return *this;} /** *The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline const Aws::MapThe job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline bool ArgumentsHasBeenSet() const { return m_argumentsHasBeenSet; } /** *The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline void SetArguments(const Aws::MapThe job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline void SetArguments(Aws::MapThe job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& WithArguments(const Aws::MapThe job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& WithArguments(Aws::MapThe job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& AddArguments(const Aws::String& key, const Aws::String& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, value); return *this; } /** *The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& AddArguments(Aws::String&& key, const Aws::String& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), value); return *this; } /** *The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& AddArguments(const Aws::String& key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, std::move(value)); return *this; } /** *The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& AddArguments(Aws::String&& key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), std::move(value)); return *this; } /** *The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& AddArguments(const char* key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, std::move(value)); return *this; } /** *The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& AddArguments(Aws::String&& key, const char* value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), value); return *this; } /** *The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.
You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.
Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.
For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobRun& AddArguments(const char* key, const char* value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, value); return *this; } /** *An error message associated with this job run.
*/ inline const Aws::String& GetErrorMessage() const{ return m_errorMessage; } /** *An error message associated with this job run.
*/ inline bool ErrorMessageHasBeenSet() const { return m_errorMessageHasBeenSet; } /** *An error message associated with this job run.
*/ inline void SetErrorMessage(const Aws::String& value) { m_errorMessageHasBeenSet = true; m_errorMessage = value; } /** *An error message associated with this job run.
*/ inline void SetErrorMessage(Aws::String&& value) { m_errorMessageHasBeenSet = true; m_errorMessage = std::move(value); } /** *An error message associated with this job run.
*/ inline void SetErrorMessage(const char* value) { m_errorMessageHasBeenSet = true; m_errorMessage.assign(value); } /** *An error message associated with this job run.
*/ inline JobRun& WithErrorMessage(const Aws::String& value) { SetErrorMessage(value); return *this;} /** *An error message associated with this job run.
*/ inline JobRun& WithErrorMessage(Aws::String&& value) { SetErrorMessage(std::move(value)); return *this;} /** *An error message associated with this job run.
*/ inline JobRun& WithErrorMessage(const char* value) { SetErrorMessage(value); return *this;} /** *A list of predecessors to this job run.
*/ inline const Aws::VectorA list of predecessors to this job run.
*/ inline bool PredecessorRunsHasBeenSet() const { return m_predecessorRunsHasBeenSet; } /** *A list of predecessors to this job run.
*/ inline void SetPredecessorRuns(const Aws::VectorA list of predecessors to this job run.
*/ inline void SetPredecessorRuns(Aws::VectorA list of predecessors to this job run.
*/ inline JobRun& WithPredecessorRuns(const Aws::VectorA list of predecessors to this job run.
*/ inline JobRun& WithPredecessorRuns(Aws::VectorA list of predecessors to this job run.
*/ inline JobRun& AddPredecessorRuns(const Predecessor& value) { m_predecessorRunsHasBeenSet = true; m_predecessorRuns.push_back(value); return *this; } /** *A list of predecessors to this job run.
*/ inline JobRun& AddPredecessorRuns(Predecessor&& value) { m_predecessorRunsHasBeenSet = true; m_predecessorRuns.push_back(std::move(value)); return *this; } /** *The amount of time (in seconds) that the job run consumed resources.
*/ inline int GetExecutionTime() const{ return m_executionTime; } /** *The amount of time (in seconds) that the job run consumed resources.
*/ inline bool ExecutionTimeHasBeenSet() const { return m_executionTimeHasBeenSet; } /** *The amount of time (in seconds) that the job run consumed resources.
*/ inline void SetExecutionTime(int value) { m_executionTimeHasBeenSet = true; m_executionTime = value; } /** *The amount of time (in seconds) that the job run consumed resources.
*/ inline JobRun& WithExecutionTime(int value) { SetExecutionTime(value); return *this;} /** *The JobRun
timeout in minutes. This is the maximum time that a
* job run can consume resources before it is terminated and enters
* TIMEOUT
status. This value overrides the timeout value set in the
* parent job.
Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).
*/ inline int GetTimeout() const{ return m_timeout; } /** *The JobRun
timeout in minutes. This is the maximum time that a
* job run can consume resources before it is terminated and enters
* TIMEOUT
status. This value overrides the timeout value set in the
* parent job.
Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).
*/ inline bool TimeoutHasBeenSet() const { return m_timeoutHasBeenSet; } /** *The JobRun
timeout in minutes. This is the maximum time that a
* job run can consume resources before it is terminated and enters
* TIMEOUT
status. This value overrides the timeout value set in the
* parent job.
Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).
*/ inline void SetTimeout(int value) { m_timeoutHasBeenSet = true; m_timeout = value; } /** *The JobRun
timeout in minutes. This is the maximum time that a
* job run can consume resources before it is terminated and enters
* TIMEOUT
status. This value overrides the timeout value set in the
* parent job.
Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).
*/ inline JobRun& WithTimeout(int value) { SetTimeout(value); return *this;} /** *For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
For
* Glue version 2.0+ jobs, you cannot specify a Maximum capacity
.
* Instead, you should specify a Worker type
and the Number of
* workers
.
Do not set MaxCapacity
if using
* WorkerType
and NumberOfWorkers
.
The value that
* can be allocated for MaxCapacity
depends on whether you are running
* a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
* job:
When you specify a Python shell job
* (JobCommand.Name
="pythonshell"), you can allocate either 0.0625 or
* 1 DPU. The default is 0.0625 DPU.
When you specify an Apache
* Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming
* ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2
* to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
* allocation.
For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
For
* Glue version 2.0+ jobs, you cannot specify a Maximum capacity
.
* Instead, you should specify a Worker type
and the Number of
* workers
.
Do not set MaxCapacity
if using
* WorkerType
and NumberOfWorkers
.
The value that
* can be allocated for MaxCapacity
depends on whether you are running
* a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
* job:
When you specify a Python shell job
* (JobCommand.Name
="pythonshell"), you can allocate either 0.0625 or
* 1 DPU. The default is 0.0625 DPU.
When you specify an Apache
* Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming
* ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2
* to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
* allocation.
For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
For
* Glue version 2.0+ jobs, you cannot specify a Maximum capacity
.
* Instead, you should specify a Worker type
and the Number of
* workers
.
Do not set MaxCapacity
if using
* WorkerType
and NumberOfWorkers
.
The value that
* can be allocated for MaxCapacity
depends on whether you are running
* a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
* job:
When you specify a Python shell job
* (JobCommand.Name
="pythonshell"), you can allocate either 0.0625 or
* 1 DPU. The default is 0.0625 DPU.
When you specify an Apache
* Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming
* ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2
* to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
* allocation.
For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
For
* Glue version 2.0+ jobs, you cannot specify a Maximum capacity
.
* Instead, you should specify a Worker type
and the Number of
* workers
.
Do not set MaxCapacity
if using
* WorkerType
and NumberOfWorkers
.
The value that
* can be allocated for MaxCapacity
depends on whether you are running
* a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
* job:
When you specify a Python shell job
* (JobCommand.Name
="pythonshell"), you can allocate either 0.0625 or
* 1 DPU. The default is 0.0625 DPU.
When you specify an Apache
* Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming
* ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2
* to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
* allocation.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The number of workers of a defined workerType
that are allocated
* when a job runs.
The number of workers of a defined workerType
that are allocated
* when a job runs.
The number of workers of a defined workerType
that are allocated
* when a job runs.
The number of workers of a defined workerType
that are allocated
* when a job runs.
The name of the SecurityConfiguration
structure to be used with
* this job run.
The name of the SecurityConfiguration
structure to be used with
* this job run.
The name of the SecurityConfiguration
structure to be used with
* this job run.
The name of the SecurityConfiguration
structure to be used with
* this job run.
The name of the SecurityConfiguration
structure to be used with
* this job run.
The name of the SecurityConfiguration
structure to be used with
* this job run.
The name of the SecurityConfiguration
structure to be used with
* this job run.
The name of the SecurityConfiguration
structure to be used with
* this job run.
The name of the log group for secure logging that can be server-side
* encrypted in Amazon CloudWatch using KMS. This name can be
* /aws-glue/jobs/
, in which case the default encryption is
* NONE
. If you add a role name and SecurityConfiguration
* name (in other words,
* /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/
), then
* that security configuration is used to encrypt the log group.
The name of the log group for secure logging that can be server-side
* encrypted in Amazon CloudWatch using KMS. This name can be
* /aws-glue/jobs/
, in which case the default encryption is
* NONE
. If you add a role name and SecurityConfiguration
* name (in other words,
* /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/
), then
* that security configuration is used to encrypt the log group.
The name of the log group for secure logging that can be server-side
* encrypted in Amazon CloudWatch using KMS. This name can be
* /aws-glue/jobs/
, in which case the default encryption is
* NONE
. If you add a role name and SecurityConfiguration
* name (in other words,
* /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/
), then
* that security configuration is used to encrypt the log group.
The name of the log group for secure logging that can be server-side
* encrypted in Amazon CloudWatch using KMS. This name can be
* /aws-glue/jobs/
, in which case the default encryption is
* NONE
. If you add a role name and SecurityConfiguration
* name (in other words,
* /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/
), then
* that security configuration is used to encrypt the log group.
The name of the log group for secure logging that can be server-side
* encrypted in Amazon CloudWatch using KMS. This name can be
* /aws-glue/jobs/
, in which case the default encryption is
* NONE
. If you add a role name and SecurityConfiguration
* name (in other words,
* /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/
), then
* that security configuration is used to encrypt the log group.
The name of the log group for secure logging that can be server-side
* encrypted in Amazon CloudWatch using KMS. This name can be
* /aws-glue/jobs/
, in which case the default encryption is
* NONE
. If you add a role name and SecurityConfiguration
* name (in other words,
* /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/
), then
* that security configuration is used to encrypt the log group.
The name of the log group for secure logging that can be server-side
* encrypted in Amazon CloudWatch using KMS. This name can be
* /aws-glue/jobs/
, in which case the default encryption is
* NONE
. If you add a role name and SecurityConfiguration
* name (in other words,
* /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/
), then
* that security configuration is used to encrypt the log group.
The name of the log group for secure logging that can be server-side
* encrypted in Amazon CloudWatch using KMS. This name can be
* /aws-glue/jobs/
, in which case the default encryption is
* NONE
. If you add a role name and SecurityConfiguration
* name (in other words,
* /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/
), then
* that security configuration is used to encrypt the log group.
Specifies configuration properties of a job run notification.
*/ inline const NotificationProperty& GetNotificationProperty() const{ return m_notificationProperty; } /** *Specifies configuration properties of a job run notification.
*/ inline bool NotificationPropertyHasBeenSet() const { return m_notificationPropertyHasBeenSet; } /** *Specifies configuration properties of a job run notification.
*/ inline void SetNotificationProperty(const NotificationProperty& value) { m_notificationPropertyHasBeenSet = true; m_notificationProperty = value; } /** *Specifies configuration properties of a job run notification.
*/ inline void SetNotificationProperty(NotificationProperty&& value) { m_notificationPropertyHasBeenSet = true; m_notificationProperty = std::move(value); } /** *Specifies configuration properties of a job run notification.
*/ inline JobRun& WithNotificationProperty(const NotificationProperty& value) { SetNotificationProperty(value); return *this;} /** *Specifies configuration properties of a job run notification.
*/ inline JobRun& WithNotificationProperty(NotificationProperty&& value) { SetNotificationProperty(std::move(value)); return *this;} /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline const Aws::String& GetGlueVersion() const{ return m_glueVersion; } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline bool GlueVersionHasBeenSet() const { return m_glueVersionHasBeenSet; } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline void SetGlueVersion(const Aws::String& value) { m_glueVersionHasBeenSet = true; m_glueVersion = value; } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline void SetGlueVersion(Aws::String&& value) { m_glueVersionHasBeenSet = true; m_glueVersion = std::move(value); } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline void SetGlueVersion(const char* value) { m_glueVersionHasBeenSet = true; m_glueVersion.assign(value); } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline JobRun& WithGlueVersion(const Aws::String& value) { SetGlueVersion(value); return *this;} /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline JobRun& WithGlueVersion(Aws::String&& value) { SetGlueVersion(std::move(value)); return *this;} /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline JobRun& WithGlueVersion(const char* value) { SetGlueVersion(value); return *this;} /** *This field populates only for Auto Scaling job runs, and represents the total
* time each executor ran during the lifecycle of a job run in seconds, multiplied
* by a DPU factor (1 for G.1X
, 2 for G.2X
, or 0.25 for
* G.025X
workers). This value may be different than the
* executionEngineRuntime
* MaxCapacity
as in the case of
* Auto Scaling jobs, as the number of executors running at a given time may be
* less than the MaxCapacity
. Therefore, it is possible that the value
* of DPUSeconds
is less than executionEngineRuntime
*
* MaxCapacity
.
This field populates only for Auto Scaling job runs, and represents the total
* time each executor ran during the lifecycle of a job run in seconds, multiplied
* by a DPU factor (1 for G.1X
, 2 for G.2X
, or 0.25 for
* G.025X
workers). This value may be different than the
* executionEngineRuntime
* MaxCapacity
as in the case of
* Auto Scaling jobs, as the number of executors running at a given time may be
* less than the MaxCapacity
. Therefore, it is possible that the value
* of DPUSeconds
is less than executionEngineRuntime
*
* MaxCapacity
.
This field populates only for Auto Scaling job runs, and represents the total
* time each executor ran during the lifecycle of a job run in seconds, multiplied
* by a DPU factor (1 for G.1X
, 2 for G.2X
, or 0.25 for
* G.025X
workers). This value may be different than the
* executionEngineRuntime
* MaxCapacity
as in the case of
* Auto Scaling jobs, as the number of executors running at a given time may be
* less than the MaxCapacity
. Therefore, it is possible that the value
* of DPUSeconds
is less than executionEngineRuntime
*
* MaxCapacity
.
This field populates only for Auto Scaling job runs, and represents the total
* time each executor ran during the lifecycle of a job run in seconds, multiplied
* by a DPU factor (1 for G.1X
, 2 for G.2X
, or 0.25 for
* G.025X
workers). This value may be different than the
* executionEngineRuntime
* MaxCapacity
as in the case of
* Auto Scaling jobs, as the number of executors running at a given time may be
* less than the MaxCapacity
. Therefore, it is possible that the value
* of DPUSeconds
is less than executionEngineRuntime
*
* MaxCapacity
.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.