/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Specifies information used to update an existing job definition. The previous
* job definition is completely overwritten by this information.See
* Also:
AWS API
* Reference
Description of the job being defined.
*/ inline const Aws::String& GetDescription() const{ return m_description; } /** *Description of the job being defined.
*/ inline bool DescriptionHasBeenSet() const { return m_descriptionHasBeenSet; } /** *Description of the job being defined.
*/ inline void SetDescription(const Aws::String& value) { m_descriptionHasBeenSet = true; m_description = value; } /** *Description of the job being defined.
*/ inline void SetDescription(Aws::String&& value) { m_descriptionHasBeenSet = true; m_description = std::move(value); } /** *Description of the job being defined.
*/ inline void SetDescription(const char* value) { m_descriptionHasBeenSet = true; m_description.assign(value); } /** *Description of the job being defined.
*/ inline JobUpdate& WithDescription(const Aws::String& value) { SetDescription(value); return *this;} /** *Description of the job being defined.
*/ inline JobUpdate& WithDescription(Aws::String&& value) { SetDescription(std::move(value)); return *this;} /** *Description of the job being defined.
*/ inline JobUpdate& WithDescription(const char* value) { SetDescription(value); return *this;} /** *This field is reserved for future use.
*/ inline const Aws::String& GetLogUri() const{ return m_logUri; } /** *This field is reserved for future use.
*/ inline bool LogUriHasBeenSet() const { return m_logUriHasBeenSet; } /** *This field is reserved for future use.
*/ inline void SetLogUri(const Aws::String& value) { m_logUriHasBeenSet = true; m_logUri = value; } /** *This field is reserved for future use.
*/ inline void SetLogUri(Aws::String&& value) { m_logUriHasBeenSet = true; m_logUri = std::move(value); } /** *This field is reserved for future use.
*/ inline void SetLogUri(const char* value) { m_logUriHasBeenSet = true; m_logUri.assign(value); } /** *This field is reserved for future use.
*/ inline JobUpdate& WithLogUri(const Aws::String& value) { SetLogUri(value); return *this;} /** *This field is reserved for future use.
*/ inline JobUpdate& WithLogUri(Aws::String&& value) { SetLogUri(std::move(value)); return *this;} /** *This field is reserved for future use.
*/ inline JobUpdate& WithLogUri(const char* value) { SetLogUri(value); return *this;} /** *The name or Amazon Resource Name (ARN) of the IAM role associated with this * job (required).
*/ inline const Aws::String& GetRole() const{ return m_role; } /** *The name or Amazon Resource Name (ARN) of the IAM role associated with this * job (required).
*/ inline bool RoleHasBeenSet() const { return m_roleHasBeenSet; } /** *The name or Amazon Resource Name (ARN) of the IAM role associated with this * job (required).
*/ inline void SetRole(const Aws::String& value) { m_roleHasBeenSet = true; m_role = value; } /** *The name or Amazon Resource Name (ARN) of the IAM role associated with this * job (required).
*/ inline void SetRole(Aws::String&& value) { m_roleHasBeenSet = true; m_role = std::move(value); } /** *The name or Amazon Resource Name (ARN) of the IAM role associated with this * job (required).
*/ inline void SetRole(const char* value) { m_roleHasBeenSet = true; m_role.assign(value); } /** *The name or Amazon Resource Name (ARN) of the IAM role associated with this * job (required).
*/ inline JobUpdate& WithRole(const Aws::String& value) { SetRole(value); return *this;} /** *The name or Amazon Resource Name (ARN) of the IAM role associated with this * job (required).
*/ inline JobUpdate& WithRole(Aws::String&& value) { SetRole(std::move(value)); return *this;} /** *The name or Amazon Resource Name (ARN) of the IAM role associated with this * job (required).
*/ inline JobUpdate& WithRole(const char* value) { SetRole(value); return *this;} /** *An ExecutionProperty
specifying the maximum number of concurrent
* runs allowed for this job.
An ExecutionProperty
specifying the maximum number of concurrent
* runs allowed for this job.
An ExecutionProperty
specifying the maximum number of concurrent
* runs allowed for this job.
An ExecutionProperty
specifying the maximum number of concurrent
* runs allowed for this job.
An ExecutionProperty
specifying the maximum number of concurrent
* runs allowed for this job.
An ExecutionProperty
specifying the maximum number of concurrent
* runs allowed for this job.
The JobCommand
that runs this job (required).
The JobCommand
that runs this job (required).
The JobCommand
that runs this job (required).
The JobCommand
that runs this job (required).
The JobCommand
that runs this job (required).
The JobCommand
that runs this job (required).
The default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline const Aws::MapThe default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline bool DefaultArgumentsHasBeenSet() const { return m_defaultArgumentsHasBeenSet; } /** *The default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline void SetDefaultArguments(const Aws::MapThe default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline void SetDefaultArguments(Aws::MapThe default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& WithDefaultArguments(const Aws::MapThe default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& WithDefaultArguments(Aws::MapThe default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& AddDefaultArguments(const Aws::String& key, const Aws::String& value) { m_defaultArgumentsHasBeenSet = true; m_defaultArguments.emplace(key, value); return *this; } /** *The default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& AddDefaultArguments(Aws::String&& key, const Aws::String& value) { m_defaultArgumentsHasBeenSet = true; m_defaultArguments.emplace(std::move(key), value); return *this; } /** *The default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& AddDefaultArguments(const Aws::String& key, Aws::String&& value) { m_defaultArgumentsHasBeenSet = true; m_defaultArguments.emplace(key, std::move(value)); return *this; } /** *The default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& AddDefaultArguments(Aws::String&& key, Aws::String&& value) { m_defaultArgumentsHasBeenSet = true; m_defaultArguments.emplace(std::move(key), std::move(value)); return *this; } /** *The default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& AddDefaultArguments(const char* key, Aws::String&& value) { m_defaultArgumentsHasBeenSet = true; m_defaultArguments.emplace(key, std::move(value)); return *this; } /** *The default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& AddDefaultArguments(Aws::String&& key, const char* value) { m_defaultArgumentsHasBeenSet = true; m_defaultArguments.emplace(std::move(key), value); return *this; } /** *The default arguments for every run of this job, specified as name-value * pairs.
You can specify arguments here that your own job-execution script * consumes, as well as arguments that Glue itself consumes.
Job arguments * may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from * a Glue Connection, Secrets Manager or other secret management mechanism if you * intend to keep them within the Job.
For information about how to specify * and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.
For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.
*/ inline JobUpdate& AddDefaultArguments(const char* key, const char* value) { m_defaultArgumentsHasBeenSet = true; m_defaultArguments.emplace(key, value); return *this; } /** *Arguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline const Aws::MapArguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline bool NonOverridableArgumentsHasBeenSet() const { return m_nonOverridableArgumentsHasBeenSet; } /** *Arguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline void SetNonOverridableArguments(const Aws::MapArguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline void SetNonOverridableArguments(Aws::MapArguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& WithNonOverridableArguments(const Aws::MapArguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& WithNonOverridableArguments(Aws::MapArguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& AddNonOverridableArguments(const Aws::String& key, const Aws::String& value) { m_nonOverridableArgumentsHasBeenSet = true; m_nonOverridableArguments.emplace(key, value); return *this; } /** *Arguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& AddNonOverridableArguments(Aws::String&& key, const Aws::String& value) { m_nonOverridableArgumentsHasBeenSet = true; m_nonOverridableArguments.emplace(std::move(key), value); return *this; } /** *Arguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& AddNonOverridableArguments(const Aws::String& key, Aws::String&& value) { m_nonOverridableArgumentsHasBeenSet = true; m_nonOverridableArguments.emplace(key, std::move(value)); return *this; } /** *Arguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& AddNonOverridableArguments(Aws::String&& key, Aws::String&& value) { m_nonOverridableArgumentsHasBeenSet = true; m_nonOverridableArguments.emplace(std::move(key), std::move(value)); return *this; } /** *Arguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& AddNonOverridableArguments(const char* key, Aws::String&& value) { m_nonOverridableArgumentsHasBeenSet = true; m_nonOverridableArguments.emplace(key, std::move(value)); return *this; } /** *Arguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& AddNonOverridableArguments(Aws::String&& key, const char* value) { m_nonOverridableArgumentsHasBeenSet = true; m_nonOverridableArguments.emplace(std::move(key), value); return *this; } /** *Arguments for this job that are not overridden when providing job arguments * in a job run, specified as name-value pairs.
*/ inline JobUpdate& AddNonOverridableArguments(const char* key, const char* value) { m_nonOverridableArgumentsHasBeenSet = true; m_nonOverridableArguments.emplace(key, value); return *this; } /** *The connections used for this job.
*/ inline const ConnectionsList& GetConnections() const{ return m_connections; } /** *The connections used for this job.
*/ inline bool ConnectionsHasBeenSet() const { return m_connectionsHasBeenSet; } /** *The connections used for this job.
*/ inline void SetConnections(const ConnectionsList& value) { m_connectionsHasBeenSet = true; m_connections = value; } /** *The connections used for this job.
*/ inline void SetConnections(ConnectionsList&& value) { m_connectionsHasBeenSet = true; m_connections = std::move(value); } /** *The connections used for this job.
*/ inline JobUpdate& WithConnections(const ConnectionsList& value) { SetConnections(value); return *this;} /** *The connections used for this job.
*/ inline JobUpdate& WithConnections(ConnectionsList&& value) { SetConnections(std::move(value)); return *this;} /** *The maximum number of times to retry this job if it fails.
*/ inline int GetMaxRetries() const{ return m_maxRetries; } /** *The maximum number of times to retry this job if it fails.
*/ inline bool MaxRetriesHasBeenSet() const { return m_maxRetriesHasBeenSet; } /** *The maximum number of times to retry this job if it fails.
*/ inline void SetMaxRetries(int value) { m_maxRetriesHasBeenSet = true; m_maxRetries = value; } /** *The maximum number of times to retry this job if it fails.
*/ inline JobUpdate& WithMaxRetries(int value) { SetMaxRetries(value); return *this;} /** *The job timeout in minutes. This is the maximum time that a job run can
* consume resources before it is terminated and enters TIMEOUT
* status. The default is 2,880 minutes (48 hours).
The job timeout in minutes. This is the maximum time that a job run can
* consume resources before it is terminated and enters TIMEOUT
* status. The default is 2,880 minutes (48 hours).
The job timeout in minutes. This is the maximum time that a job run can
* consume resources before it is terminated and enters TIMEOUT
* status. The default is 2,880 minutes (48 hours).
The job timeout in minutes. This is the maximum time that a job run can
* consume resources before it is terminated and enters TIMEOUT
* status. The default is 2,880 minutes (48 hours).
For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
For
* Glue version 2.0+ jobs, you cannot specify a Maximum capacity
.
* Instead, you should specify a Worker type
and the Number of
* workers
.
Do not set MaxCapacity
if using
* WorkerType
and NumberOfWorkers
.
The value that
* can be allocated for MaxCapacity
depends on whether you are running
* a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
* job:
When you specify a Python shell job
* (JobCommand.Name
="pythonshell"), you can allocate either 0.0625 or
* 1 DPU. The default is 0.0625 DPU.
When you specify an Apache
* Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming
* ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2
* to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
* allocation.
For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
For
* Glue version 2.0+ jobs, you cannot specify a Maximum capacity
.
* Instead, you should specify a Worker type
and the Number of
* workers
.
Do not set MaxCapacity
if using
* WorkerType
and NumberOfWorkers
.
The value that
* can be allocated for MaxCapacity
depends on whether you are running
* a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
* job:
When you specify a Python shell job
* (JobCommand.Name
="pythonshell"), you can allocate either 0.0625 or
* 1 DPU. The default is 0.0625 DPU.
When you specify an Apache
* Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming
* ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2
* to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
* allocation.
For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
For
* Glue version 2.0+ jobs, you cannot specify a Maximum capacity
.
* Instead, you should specify a Worker type
and the Number of
* workers
.
Do not set MaxCapacity
if using
* WorkerType
and NumberOfWorkers
.
The value that
* can be allocated for MaxCapacity
depends on whether you are running
* a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
* job:
When you specify a Python shell job
* (JobCommand.Name
="pythonshell"), you can allocate either 0.0625 or
* 1 DPU. The default is 0.0625 DPU.
When you specify an Apache
* Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming
* ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2
* to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
* allocation.
For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
For
* Glue version 2.0+ jobs, you cannot specify a Maximum capacity
.
* Instead, you should specify a Worker type
and the Number of
* workers
.
Do not set MaxCapacity
if using
* WorkerType
and NumberOfWorkers
.
The value that
* can be allocated for MaxCapacity
depends on whether you are running
* a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
* job:
When you specify a Python shell job
* (JobCommand.Name
="pythonshell"), you can allocate either 0.0625 or
* 1 DPU. The default is 0.0625 DPU.
When you specify an Apache
* Spark ETL job (JobCommand.Name
="glueetl") or Apache Spark streaming
* ETL job (JobCommand.Name
="gluestreaming"), you can allocate from 2
* to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
* allocation.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.
For the G.1X
worker type, each
* worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately
* 34GB free), and provides 1 executor per worker. We recommend this worker type
* for workloads such as data transforms, joins, and queries, to offers a scalable
* and cost effective way to run most jobs.
For the
* G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
* memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
* worker. We recommend this worker type for workloads such as data transforms,
* joins, and queries, to offers a scalable and cost effective way to run most
* jobs.
For the G.4X
worker type, each worker maps
* to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free),
* and provides 1 executor per worker. We recommend this worker type for jobs whose
* workloads contain your most demanding transforms, aggregations, joins, and
* queries. This worker type is available only for Glue version 3.0 or later Spark
* ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East
* (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific
* (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe
* (Ireland), and Europe (Stockholm).
For the G.8X
* worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB
* disk (approximately 487GB free), and provides 1 executor per worker. We
* recommend this worker type for jobs whose workloads contain your most demanding
* transforms, aggregations, joins, and queries. This worker type is available only
* for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services
* Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2
* vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1
* executor per worker. We recommend this worker type for low volume streaming
* jobs. This worker type is only available for Glue version 3.0 streaming
* jobs.
For the Z.2X
worker type, each worker maps
* to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB
* free), and provides up to 8 Ray workers based on the autoscaler.
The number of workers of a defined workerType
that are allocated
* when a job runs.
The number of workers of a defined workerType
that are allocated
* when a job runs.
The number of workers of a defined workerType
that are allocated
* when a job runs.
The number of workers of a defined workerType
that are allocated
* when a job runs.
The name of the SecurityConfiguration
structure to be used with
* this job.
The name of the SecurityConfiguration
structure to be used with
* this job.
The name of the SecurityConfiguration
structure to be used with
* this job.
The name of the SecurityConfiguration
structure to be used with
* this job.
The name of the SecurityConfiguration
structure to be used with
* this job.
The name of the SecurityConfiguration
structure to be used with
* this job.
The name of the SecurityConfiguration
structure to be used with
* this job.
The name of the SecurityConfiguration
structure to be used with
* this job.
Specifies the configuration properties of a job notification.
*/ inline const NotificationProperty& GetNotificationProperty() const{ return m_notificationProperty; } /** *Specifies the configuration properties of a job notification.
*/ inline bool NotificationPropertyHasBeenSet() const { return m_notificationPropertyHasBeenSet; } /** *Specifies the configuration properties of a job notification.
*/ inline void SetNotificationProperty(const NotificationProperty& value) { m_notificationPropertyHasBeenSet = true; m_notificationProperty = value; } /** *Specifies the configuration properties of a job notification.
*/ inline void SetNotificationProperty(NotificationProperty&& value) { m_notificationPropertyHasBeenSet = true; m_notificationProperty = std::move(value); } /** *Specifies the configuration properties of a job notification.
*/ inline JobUpdate& WithNotificationProperty(const NotificationProperty& value) { SetNotificationProperty(value); return *this;} /** *Specifies the configuration properties of a job notification.
*/ inline JobUpdate& WithNotificationProperty(NotificationProperty&& value) { SetNotificationProperty(std::move(value)); return *this;} /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline const Aws::String& GetGlueVersion() const{ return m_glueVersion; } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline bool GlueVersionHasBeenSet() const { return m_glueVersionHasBeenSet; } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline void SetGlueVersion(const Aws::String& value) { m_glueVersionHasBeenSet = true; m_glueVersion = value; } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline void SetGlueVersion(Aws::String&& value) { m_glueVersionHasBeenSet = true; m_glueVersion = std::move(value); } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline void SetGlueVersion(const char* value) { m_glueVersionHasBeenSet = true; m_glueVersion.assign(value); } /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline JobUpdate& WithGlueVersion(const Aws::String& value) { SetGlueVersion(value); return *this;} /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline JobUpdate& WithGlueVersion(Aws::String&& value) { SetGlueVersion(std::move(value)); return *this;} /** *In Spark jobs, GlueVersion
determines the versions of Apache
* Spark and Python that Glue available in a job. The Python version indicates the
* version supported for jobs of type Spark.
Ray jobs should set
* GlueVersion
to 4.0
or greater. However, the versions
* of Ray, Python and additional libraries available in your Ray job are determined
* by the Runtime
parameter of the Job command.
For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.
Jobs that are created without specifying a Glue * version default to Glue 0.9.
*/ inline JobUpdate& WithGlueVersion(const char* value) { SetGlueVersion(value); return *this;} /** *The representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline const Aws::MapThe representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline bool CodeGenConfigurationNodesHasBeenSet() const { return m_codeGenConfigurationNodesHasBeenSet; } /** *The representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline void SetCodeGenConfigurationNodes(const Aws::MapThe representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline void SetCodeGenConfigurationNodes(Aws::MapThe representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline JobUpdate& WithCodeGenConfigurationNodes(const Aws::MapThe representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline JobUpdate& WithCodeGenConfigurationNodes(Aws::MapThe representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline JobUpdate& AddCodeGenConfigurationNodes(const Aws::String& key, const CodeGenConfigurationNode& value) { m_codeGenConfigurationNodesHasBeenSet = true; m_codeGenConfigurationNodes.emplace(key, value); return *this; } /** *The representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline JobUpdate& AddCodeGenConfigurationNodes(Aws::String&& key, const CodeGenConfigurationNode& value) { m_codeGenConfigurationNodesHasBeenSet = true; m_codeGenConfigurationNodes.emplace(std::move(key), value); return *this; } /** *The representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline JobUpdate& AddCodeGenConfigurationNodes(const Aws::String& key, CodeGenConfigurationNode&& value) { m_codeGenConfigurationNodesHasBeenSet = true; m_codeGenConfigurationNodes.emplace(key, std::move(value)); return *this; } /** *The representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline JobUpdate& AddCodeGenConfigurationNodes(Aws::String&& key, CodeGenConfigurationNode&& value) { m_codeGenConfigurationNodesHasBeenSet = true; m_codeGenConfigurationNodes.emplace(std::move(key), std::move(value)); return *this; } /** *The representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline JobUpdate& AddCodeGenConfigurationNodes(const char* key, CodeGenConfigurationNode&& value) { m_codeGenConfigurationNodesHasBeenSet = true; m_codeGenConfigurationNodes.emplace(key, std::move(value)); return *this; } /** *The representation of a directed acyclic graph on which both the Glue Studio * visual component and Glue Studio code generation is based.
*/ inline JobUpdate& AddCodeGenConfigurationNodes(const char* key, const CodeGenConfigurationNode& value) { m_codeGenConfigurationNodesHasBeenSet = true; m_codeGenConfigurationNodes.emplace(key, value); return *this; } /** *Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.
The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *
Only jobs with Glue version 3.0 and above and command type
* glueetl
will be allowed to set ExecutionClass
to
* FLEX
. The flexible execution class is available for Spark jobs.
The details for a source control configuration for a job, allowing * synchronization of job artifacts to or from a remote repository.
*/ inline const SourceControlDetails& GetSourceControlDetails() const{ return m_sourceControlDetails; } /** *The details for a source control configuration for a job, allowing * synchronization of job artifacts to or from a remote repository.
*/ inline bool SourceControlDetailsHasBeenSet() const { return m_sourceControlDetailsHasBeenSet; } /** *The details for a source control configuration for a job, allowing * synchronization of job artifacts to or from a remote repository.
*/ inline void SetSourceControlDetails(const SourceControlDetails& value) { m_sourceControlDetailsHasBeenSet = true; m_sourceControlDetails = value; } /** *The details for a source control configuration for a job, allowing * synchronization of job artifacts to or from a remote repository.
*/ inline void SetSourceControlDetails(SourceControlDetails&& value) { m_sourceControlDetailsHasBeenSet = true; m_sourceControlDetails = std::move(value); } /** *The details for a source control configuration for a job, allowing * synchronization of job artifacts to or from a remote repository.
*/ inline JobUpdate& WithSourceControlDetails(const SourceControlDetails& value) { SetSourceControlDetails(value); return *this;} /** *The details for a source control configuration for a job, allowing * synchronization of job artifacts to or from a remote repository.
*/ inline JobUpdate& WithSourceControlDetails(SourceControlDetails&& value) { SetSourceControlDetails(std::move(value)); return *this;} private: Aws::String m_description; bool m_descriptionHasBeenSet = false; Aws::String m_logUri; bool m_logUriHasBeenSet = false; Aws::String m_role; bool m_roleHasBeenSet = false; ExecutionProperty m_executionProperty; bool m_executionPropertyHasBeenSet = false; JobCommand m_command; bool m_commandHasBeenSet = false; Aws::Map