/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include namespace Aws { namespace Glue { namespace Model { /** */ class StartJobRunRequest : public GlueRequest { public: AWS_GLUE_API StartJobRunRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "StartJobRun"; } AWS_GLUE_API Aws::String SerializePayload() const override; AWS_GLUE_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override; /** *

The name of the job definition to use.

*/ inline const Aws::String& GetJobName() const{ return m_jobName; } /** *

The name of the job definition to use.

*/ inline bool JobNameHasBeenSet() const { return m_jobNameHasBeenSet; } /** *

The name of the job definition to use.

*/ inline void SetJobName(const Aws::String& value) { m_jobNameHasBeenSet = true; m_jobName = value; } /** *

The name of the job definition to use.

*/ inline void SetJobName(Aws::String&& value) { m_jobNameHasBeenSet = true; m_jobName = std::move(value); } /** *

The name of the job definition to use.

*/ inline void SetJobName(const char* value) { m_jobNameHasBeenSet = true; m_jobName.assign(value); } /** *

The name of the job definition to use.

*/ inline StartJobRunRequest& WithJobName(const Aws::String& value) { SetJobName(value); return *this;} /** *

The name of the job definition to use.

*/ inline StartJobRunRequest& WithJobName(Aws::String&& value) { SetJobName(std::move(value)); return *this;} /** *

The name of the job definition to use.

*/ inline StartJobRunRequest& WithJobName(const char* value) { SetJobName(value); return *this;} /** *

The ID of a previous JobRun to retry.

*/ inline const Aws::String& GetJobRunId() const{ return m_jobRunId; } /** *

The ID of a previous JobRun to retry.

*/ inline bool JobRunIdHasBeenSet() const { return m_jobRunIdHasBeenSet; } /** *

The ID of a previous JobRun to retry.

*/ inline void SetJobRunId(const Aws::String& value) { m_jobRunIdHasBeenSet = true; m_jobRunId = value; } /** *

The ID of a previous JobRun to retry.

*/ inline void SetJobRunId(Aws::String&& value) { m_jobRunIdHasBeenSet = true; m_jobRunId = std::move(value); } /** *

The ID of a previous JobRun to retry.

*/ inline void SetJobRunId(const char* value) { m_jobRunIdHasBeenSet = true; m_jobRunId.assign(value); } /** *

The ID of a previous JobRun to retry.

*/ inline StartJobRunRequest& WithJobRunId(const Aws::String& value) { SetJobRunId(value); return *this;} /** *

The ID of a previous JobRun to retry.

*/ inline StartJobRunRequest& WithJobRunId(Aws::String&& value) { SetJobRunId(std::move(value)); return *this;} /** *

The ID of a previous JobRun to retry.

*/ inline StartJobRunRequest& WithJobRunId(const char* value) { SetJobRunId(value); return *this;} /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline const Aws::Map& GetArguments() const{ return m_arguments; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline bool ArgumentsHasBeenSet() const { return m_argumentsHasBeenSet; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline void SetArguments(const Aws::Map& value) { m_argumentsHasBeenSet = true; m_arguments = value; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline void SetArguments(Aws::Map&& value) { m_argumentsHasBeenSet = true; m_arguments = std::move(value); } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& WithArguments(const Aws::Map& value) { SetArguments(value); return *this;} /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& WithArguments(Aws::Map&& value) { SetArguments(std::move(value)); return *this;} /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& AddArguments(const Aws::String& key, const Aws::String& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, value); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& AddArguments(Aws::String&& key, const Aws::String& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), value); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& AddArguments(const Aws::String& key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, std::move(value)); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& AddArguments(Aws::String&& key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), std::move(value)); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& AddArguments(const char* key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, std::move(value)); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& AddArguments(Aws::String&& key, const char* value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), value); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline StartJobRunRequest& AddArguments(const char* key, const char* value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, value); return *this; } /** *

The JobRun timeout in minutes. This is the maximum time that a * job run can consume resources before it is terminated and enters * TIMEOUT status. This value overrides the timeout value set in the * parent job.

Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).

*/ inline int GetTimeout() const{ return m_timeout; } /** *

The JobRun timeout in minutes. This is the maximum time that a * job run can consume resources before it is terminated and enters * TIMEOUT status. This value overrides the timeout value set in the * parent job.

Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).

*/ inline bool TimeoutHasBeenSet() const { return m_timeoutHasBeenSet; } /** *

The JobRun timeout in minutes. This is the maximum time that a * job run can consume resources before it is terminated and enters * TIMEOUT status. This value overrides the timeout value set in the * parent job.

Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).

*/ inline void SetTimeout(int value) { m_timeoutHasBeenSet = true; m_timeout = value; } /** *

The JobRun timeout in minutes. This is the maximum time that a * job run can consume resources before it is terminated and enters * TIMEOUT status. This value overrides the timeout value set in the * parent job.

Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).

*/ inline StartJobRunRequest& WithTimeout(int value) { SetTimeout(value); return *this;} /** *

For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For * Glue version 2.0+ jobs, you cannot specify a Maximum capacity. * Instead, you should specify a Worker type and the Number of * workers.

Do not set MaxCapacity if using * WorkerType and NumberOfWorkers.

The value that * can be allocated for MaxCapacity depends on whether you are running * a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL * job:

  • When you specify a Python shell job * (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or * 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache * Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 * to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU * allocation.

*/ inline double GetMaxCapacity() const{ return m_maxCapacity; } /** *

For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For * Glue version 2.0+ jobs, you cannot specify a Maximum capacity. * Instead, you should specify a Worker type and the Number of * workers.

Do not set MaxCapacity if using * WorkerType and NumberOfWorkers.

The value that * can be allocated for MaxCapacity depends on whether you are running * a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL * job:

  • When you specify a Python shell job * (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or * 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache * Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 * to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU * allocation.

*/ inline bool MaxCapacityHasBeenSet() const { return m_maxCapacityHasBeenSet; } /** *

For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For * Glue version 2.0+ jobs, you cannot specify a Maximum capacity. * Instead, you should specify a Worker type and the Number of * workers.

Do not set MaxCapacity if using * WorkerType and NumberOfWorkers.

The value that * can be allocated for MaxCapacity depends on whether you are running * a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL * job:

  • When you specify a Python shell job * (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or * 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache * Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 * to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU * allocation.

*/ inline void SetMaxCapacity(double value) { m_maxCapacityHasBeenSet = true; m_maxCapacity = value; } /** *

For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For * Glue version 2.0+ jobs, you cannot specify a Maximum capacity. * Instead, you should specify a Worker type and the Number of * workers.

Do not set MaxCapacity if using * WorkerType and NumberOfWorkers.

The value that * can be allocated for MaxCapacity depends on whether you are running * a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL * job:

  • When you specify a Python shell job * (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or * 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache * Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 * to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU * allocation.

*/ inline StartJobRunRequest& WithMaxCapacity(double value) { SetMaxCapacity(value); return *this;} /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline const Aws::String& GetSecurityConfiguration() const{ return m_securityConfiguration; } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline bool SecurityConfigurationHasBeenSet() const { return m_securityConfigurationHasBeenSet; } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline void SetSecurityConfiguration(const Aws::String& value) { m_securityConfigurationHasBeenSet = true; m_securityConfiguration = value; } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline void SetSecurityConfiguration(Aws::String&& value) { m_securityConfigurationHasBeenSet = true; m_securityConfiguration = std::move(value); } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline void SetSecurityConfiguration(const char* value) { m_securityConfigurationHasBeenSet = true; m_securityConfiguration.assign(value); } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline StartJobRunRequest& WithSecurityConfiguration(const Aws::String& value) { SetSecurityConfiguration(value); return *this;} /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline StartJobRunRequest& WithSecurityConfiguration(Aws::String&& value) { SetSecurityConfiguration(std::move(value)); return *this;} /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline StartJobRunRequest& WithSecurityConfiguration(const char* value) { SetSecurityConfiguration(value); return *this;} /** *

Specifies configuration properties of a job run notification.

*/ inline const NotificationProperty& GetNotificationProperty() const{ return m_notificationProperty; } /** *

Specifies configuration properties of a job run notification.

*/ inline bool NotificationPropertyHasBeenSet() const { return m_notificationPropertyHasBeenSet; } /** *

Specifies configuration properties of a job run notification.

*/ inline void SetNotificationProperty(const NotificationProperty& value) { m_notificationPropertyHasBeenSet = true; m_notificationProperty = value; } /** *

Specifies configuration properties of a job run notification.

*/ inline void SetNotificationProperty(NotificationProperty&& value) { m_notificationPropertyHasBeenSet = true; m_notificationProperty = std::move(value); } /** *

Specifies configuration properties of a job run notification.

*/ inline StartJobRunRequest& WithNotificationProperty(const NotificationProperty& value) { SetNotificationProperty(value); return *this;} /** *

Specifies configuration properties of a job run notification.

*/ inline StartJobRunRequest& WithNotificationProperty(NotificationProperty&& value) { SetNotificationProperty(std::move(value)); return *this;} /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline const WorkerType& GetWorkerType() const{ return m_workerType; } /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline bool WorkerTypeHasBeenSet() const { return m_workerTypeHasBeenSet; } /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline void SetWorkerType(const WorkerType& value) { m_workerTypeHasBeenSet = true; m_workerType = value; } /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline void SetWorkerType(WorkerType&& value) { m_workerTypeHasBeenSet = true; m_workerType = std::move(value); } /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline StartJobRunRequest& WithWorkerType(const WorkerType& value) { SetWorkerType(value); return *this;} /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline StartJobRunRequest& WithWorkerType(WorkerType&& value) { SetWorkerType(std::move(value)); return *this;} /** *

The number of workers of a defined workerType that are allocated * when a job runs.

*/ inline int GetNumberOfWorkers() const{ return m_numberOfWorkers; } /** *

The number of workers of a defined workerType that are allocated * when a job runs.

*/ inline bool NumberOfWorkersHasBeenSet() const { return m_numberOfWorkersHasBeenSet; } /** *

The number of workers of a defined workerType that are allocated * when a job runs.

*/ inline void SetNumberOfWorkers(int value) { m_numberOfWorkersHasBeenSet = true; m_numberOfWorkers = value; } /** *

The number of workers of a defined workerType that are allocated * when a job runs.

*/ inline StartJobRunRequest& WithNumberOfWorkers(int value) { SetNumberOfWorkers(value); return *this;} /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline const ExecutionClass& GetExecutionClass() const{ return m_executionClass; } /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline bool ExecutionClassHasBeenSet() const { return m_executionClassHasBeenSet; } /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline void SetExecutionClass(const ExecutionClass& value) { m_executionClassHasBeenSet = true; m_executionClass = value; } /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline void SetExecutionClass(ExecutionClass&& value) { m_executionClassHasBeenSet = true; m_executionClass = std::move(value); } /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline StartJobRunRequest& WithExecutionClass(const ExecutionClass& value) { SetExecutionClass(value); return *this;} /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline StartJobRunRequest& WithExecutionClass(ExecutionClass&& value) { SetExecutionClass(std::move(value)); return *this;} private: Aws::String m_jobName; bool m_jobNameHasBeenSet = false; Aws::String m_jobRunId; bool m_jobRunIdHasBeenSet = false; Aws::Map m_arguments; bool m_argumentsHasBeenSet = false; int m_timeout; bool m_timeoutHasBeenSet = false; double m_maxCapacity; bool m_maxCapacityHasBeenSet = false; Aws::String m_securityConfiguration; bool m_securityConfigurationHasBeenSet = false; NotificationProperty m_notificationProperty; bool m_notificationPropertyHasBeenSet = false; WorkerType m_workerType; bool m_workerTypeHasBeenSet = false; int m_numberOfWorkers; bool m_numberOfWorkersHasBeenSet = false; ExecutionClass m_executionClass; bool m_executionClassHasBeenSet = false; }; } // namespace Model } // namespace Glue } // namespace Aws