/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace Glue { namespace Model { /** *

Contains information about a job run.

See Also:

AWS API * Reference

*/ class JobRun { public: AWS_GLUE_API JobRun(); AWS_GLUE_API JobRun(Aws::Utils::Json::JsonView jsonValue); AWS_GLUE_API JobRun& operator=(Aws::Utils::Json::JsonView jsonValue); AWS_GLUE_API Aws::Utils::Json::JsonValue Jsonize() const; /** *

The ID of this job run.

*/ inline const Aws::String& GetId() const{ return m_id; } /** *

The ID of this job run.

*/ inline bool IdHasBeenSet() const { return m_idHasBeenSet; } /** *

The ID of this job run.

*/ inline void SetId(const Aws::String& value) { m_idHasBeenSet = true; m_id = value; } /** *

The ID of this job run.

*/ inline void SetId(Aws::String&& value) { m_idHasBeenSet = true; m_id = std::move(value); } /** *

The ID of this job run.

*/ inline void SetId(const char* value) { m_idHasBeenSet = true; m_id.assign(value); } /** *

The ID of this job run.

*/ inline JobRun& WithId(const Aws::String& value) { SetId(value); return *this;} /** *

The ID of this job run.

*/ inline JobRun& WithId(Aws::String&& value) { SetId(std::move(value)); return *this;} /** *

The ID of this job run.

*/ inline JobRun& WithId(const char* value) { SetId(value); return *this;} /** *

The number of the attempt to run this job.

*/ inline int GetAttempt() const{ return m_attempt; } /** *

The number of the attempt to run this job.

*/ inline bool AttemptHasBeenSet() const { return m_attemptHasBeenSet; } /** *

The number of the attempt to run this job.

*/ inline void SetAttempt(int value) { m_attemptHasBeenSet = true; m_attempt = value; } /** *

The number of the attempt to run this job.

*/ inline JobRun& WithAttempt(int value) { SetAttempt(value); return *this;} /** *

The ID of the previous run of this job. For example, the * JobRunId specified in the StartJobRun action.

*/ inline const Aws::String& GetPreviousRunId() const{ return m_previousRunId; } /** *

The ID of the previous run of this job. For example, the * JobRunId specified in the StartJobRun action.

*/ inline bool PreviousRunIdHasBeenSet() const { return m_previousRunIdHasBeenSet; } /** *

The ID of the previous run of this job. For example, the * JobRunId specified in the StartJobRun action.

*/ inline void SetPreviousRunId(const Aws::String& value) { m_previousRunIdHasBeenSet = true; m_previousRunId = value; } /** *

The ID of the previous run of this job. For example, the * JobRunId specified in the StartJobRun action.

*/ inline void SetPreviousRunId(Aws::String&& value) { m_previousRunIdHasBeenSet = true; m_previousRunId = std::move(value); } /** *

The ID of the previous run of this job. For example, the * JobRunId specified in the StartJobRun action.

*/ inline void SetPreviousRunId(const char* value) { m_previousRunIdHasBeenSet = true; m_previousRunId.assign(value); } /** *

The ID of the previous run of this job. For example, the * JobRunId specified in the StartJobRun action.

*/ inline JobRun& WithPreviousRunId(const Aws::String& value) { SetPreviousRunId(value); return *this;} /** *

The ID of the previous run of this job. For example, the * JobRunId specified in the StartJobRun action.

*/ inline JobRun& WithPreviousRunId(Aws::String&& value) { SetPreviousRunId(std::move(value)); return *this;} /** *

The ID of the previous run of this job. For example, the * JobRunId specified in the StartJobRun action.

*/ inline JobRun& WithPreviousRunId(const char* value) { SetPreviousRunId(value); return *this;} /** *

The name of the trigger that started this job run.

*/ inline const Aws::String& GetTriggerName() const{ return m_triggerName; } /** *

The name of the trigger that started this job run.

*/ inline bool TriggerNameHasBeenSet() const { return m_triggerNameHasBeenSet; } /** *

The name of the trigger that started this job run.

*/ inline void SetTriggerName(const Aws::String& value) { m_triggerNameHasBeenSet = true; m_triggerName = value; } /** *

The name of the trigger that started this job run.

*/ inline void SetTriggerName(Aws::String&& value) { m_triggerNameHasBeenSet = true; m_triggerName = std::move(value); } /** *

The name of the trigger that started this job run.

*/ inline void SetTriggerName(const char* value) { m_triggerNameHasBeenSet = true; m_triggerName.assign(value); } /** *

The name of the trigger that started this job run.

*/ inline JobRun& WithTriggerName(const Aws::String& value) { SetTriggerName(value); return *this;} /** *

The name of the trigger that started this job run.

*/ inline JobRun& WithTriggerName(Aws::String&& value) { SetTriggerName(std::move(value)); return *this;} /** *

The name of the trigger that started this job run.

*/ inline JobRun& WithTriggerName(const char* value) { SetTriggerName(value); return *this;} /** *

The name of the job definition being used in this run.

*/ inline const Aws::String& GetJobName() const{ return m_jobName; } /** *

The name of the job definition being used in this run.

*/ inline bool JobNameHasBeenSet() const { return m_jobNameHasBeenSet; } /** *

The name of the job definition being used in this run.

*/ inline void SetJobName(const Aws::String& value) { m_jobNameHasBeenSet = true; m_jobName = value; } /** *

The name of the job definition being used in this run.

*/ inline void SetJobName(Aws::String&& value) { m_jobNameHasBeenSet = true; m_jobName = std::move(value); } /** *

The name of the job definition being used in this run.

*/ inline void SetJobName(const char* value) { m_jobNameHasBeenSet = true; m_jobName.assign(value); } /** *

The name of the job definition being used in this run.

*/ inline JobRun& WithJobName(const Aws::String& value) { SetJobName(value); return *this;} /** *

The name of the job definition being used in this run.

*/ inline JobRun& WithJobName(Aws::String&& value) { SetJobName(std::move(value)); return *this;} /** *

The name of the job definition being used in this run.

*/ inline JobRun& WithJobName(const char* value) { SetJobName(value); return *this;} /** *

The date and time at which this job run was started.

*/ inline const Aws::Utils::DateTime& GetStartedOn() const{ return m_startedOn; } /** *

The date and time at which this job run was started.

*/ inline bool StartedOnHasBeenSet() const { return m_startedOnHasBeenSet; } /** *

The date and time at which this job run was started.

*/ inline void SetStartedOn(const Aws::Utils::DateTime& value) { m_startedOnHasBeenSet = true; m_startedOn = value; } /** *

The date and time at which this job run was started.

*/ inline void SetStartedOn(Aws::Utils::DateTime&& value) { m_startedOnHasBeenSet = true; m_startedOn = std::move(value); } /** *

The date and time at which this job run was started.

*/ inline JobRun& WithStartedOn(const Aws::Utils::DateTime& value) { SetStartedOn(value); return *this;} /** *

The date and time at which this job run was started.

*/ inline JobRun& WithStartedOn(Aws::Utils::DateTime&& value) { SetStartedOn(std::move(value)); return *this;} /** *

The last time that this job run was modified.

*/ inline const Aws::Utils::DateTime& GetLastModifiedOn() const{ return m_lastModifiedOn; } /** *

The last time that this job run was modified.

*/ inline bool LastModifiedOnHasBeenSet() const { return m_lastModifiedOnHasBeenSet; } /** *

The last time that this job run was modified.

*/ inline void SetLastModifiedOn(const Aws::Utils::DateTime& value) { m_lastModifiedOnHasBeenSet = true; m_lastModifiedOn = value; } /** *

The last time that this job run was modified.

*/ inline void SetLastModifiedOn(Aws::Utils::DateTime&& value) { m_lastModifiedOnHasBeenSet = true; m_lastModifiedOn = std::move(value); } /** *

The last time that this job run was modified.

*/ inline JobRun& WithLastModifiedOn(const Aws::Utils::DateTime& value) { SetLastModifiedOn(value); return *this;} /** *

The last time that this job run was modified.

*/ inline JobRun& WithLastModifiedOn(Aws::Utils::DateTime&& value) { SetLastModifiedOn(std::move(value)); return *this;} /** *

The date and time that this job run completed.

*/ inline const Aws::Utils::DateTime& GetCompletedOn() const{ return m_completedOn; } /** *

The date and time that this job run completed.

*/ inline bool CompletedOnHasBeenSet() const { return m_completedOnHasBeenSet; } /** *

The date and time that this job run completed.

*/ inline void SetCompletedOn(const Aws::Utils::DateTime& value) { m_completedOnHasBeenSet = true; m_completedOn = value; } /** *

The date and time that this job run completed.

*/ inline void SetCompletedOn(Aws::Utils::DateTime&& value) { m_completedOnHasBeenSet = true; m_completedOn = std::move(value); } /** *

The date and time that this job run completed.

*/ inline JobRun& WithCompletedOn(const Aws::Utils::DateTime& value) { SetCompletedOn(value); return *this;} /** *

The date and time that this job run completed.

*/ inline JobRun& WithCompletedOn(Aws::Utils::DateTime&& value) { SetCompletedOn(std::move(value)); return *this;} /** *

The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.

*/ inline const JobRunState& GetJobRunState() const{ return m_jobRunState; } /** *

The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.

*/ inline bool JobRunStateHasBeenSet() const { return m_jobRunStateHasBeenSet; } /** *

The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.

*/ inline void SetJobRunState(const JobRunState& value) { m_jobRunStateHasBeenSet = true; m_jobRunState = value; } /** *

The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.

*/ inline void SetJobRunState(JobRunState&& value) { m_jobRunStateHasBeenSet = true; m_jobRunState = std::move(value); } /** *

The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.

*/ inline JobRun& WithJobRunState(const JobRunState& value) { SetJobRunState(value); return *this;} /** *

The current state of the job run. For more information about the statuses of * jobs that have terminated abnormally, see Glue Job * Run Statuses.

*/ inline JobRun& WithJobRunState(JobRunState&& value) { SetJobRunState(std::move(value)); return *this;} /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline const Aws::Map& GetArguments() const{ return m_arguments; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline bool ArgumentsHasBeenSet() const { return m_argumentsHasBeenSet; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline void SetArguments(const Aws::Map& value) { m_argumentsHasBeenSet = true; m_arguments = value; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline void SetArguments(Aws::Map&& value) { m_argumentsHasBeenSet = true; m_arguments = std::move(value); } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& WithArguments(const Aws::Map& value) { SetArguments(value); return *this;} /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& WithArguments(Aws::Map&& value) { SetArguments(std::move(value)); return *this;} /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& AddArguments(const Aws::String& key, const Aws::String& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, value); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& AddArguments(Aws::String&& key, const Aws::String& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), value); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& AddArguments(const Aws::String& key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, std::move(value)); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& AddArguments(Aws::String&& key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), std::move(value)); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& AddArguments(const char* key, Aws::String&& value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, std::move(value)); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& AddArguments(Aws::String&& key, const char* value) { m_argumentsHasBeenSet = true; m_arguments.emplace(std::move(key), value); return *this; } /** *

The job arguments associated with this run. For this job run, they replace * the default arguments set in the job definition itself.

You can specify * arguments here that your own job-execution script consumes, as well as arguments * that Glue itself consumes.

Job arguments may be logged. Do not pass * plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets * Manager or other secret management mechanism if you intend to keep them within * the Job.

For information about how to specify and consume your own Job * arguments, see the Calling * Glue APIs in Python topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Spark jobs, * see the Special * Parameters Used by Glue topic in the developer guide.

For information * about the arguments you can provide to this field when configuring Ray jobs, see * Using * job parameters in Ray jobs in the developer guide.

*/ inline JobRun& AddArguments(const char* key, const char* value) { m_argumentsHasBeenSet = true; m_arguments.emplace(key, value); return *this; } /** *

An error message associated with this job run.

*/ inline const Aws::String& GetErrorMessage() const{ return m_errorMessage; } /** *

An error message associated with this job run.

*/ inline bool ErrorMessageHasBeenSet() const { return m_errorMessageHasBeenSet; } /** *

An error message associated with this job run.

*/ inline void SetErrorMessage(const Aws::String& value) { m_errorMessageHasBeenSet = true; m_errorMessage = value; } /** *

An error message associated with this job run.

*/ inline void SetErrorMessage(Aws::String&& value) { m_errorMessageHasBeenSet = true; m_errorMessage = std::move(value); } /** *

An error message associated with this job run.

*/ inline void SetErrorMessage(const char* value) { m_errorMessageHasBeenSet = true; m_errorMessage.assign(value); } /** *

An error message associated with this job run.

*/ inline JobRun& WithErrorMessage(const Aws::String& value) { SetErrorMessage(value); return *this;} /** *

An error message associated with this job run.

*/ inline JobRun& WithErrorMessage(Aws::String&& value) { SetErrorMessage(std::move(value)); return *this;} /** *

An error message associated with this job run.

*/ inline JobRun& WithErrorMessage(const char* value) { SetErrorMessage(value); return *this;} /** *

A list of predecessors to this job run.

*/ inline const Aws::Vector& GetPredecessorRuns() const{ return m_predecessorRuns; } /** *

A list of predecessors to this job run.

*/ inline bool PredecessorRunsHasBeenSet() const { return m_predecessorRunsHasBeenSet; } /** *

A list of predecessors to this job run.

*/ inline void SetPredecessorRuns(const Aws::Vector& value) { m_predecessorRunsHasBeenSet = true; m_predecessorRuns = value; } /** *

A list of predecessors to this job run.

*/ inline void SetPredecessorRuns(Aws::Vector&& value) { m_predecessorRunsHasBeenSet = true; m_predecessorRuns = std::move(value); } /** *

A list of predecessors to this job run.

*/ inline JobRun& WithPredecessorRuns(const Aws::Vector& value) { SetPredecessorRuns(value); return *this;} /** *

A list of predecessors to this job run.

*/ inline JobRun& WithPredecessorRuns(Aws::Vector&& value) { SetPredecessorRuns(std::move(value)); return *this;} /** *

A list of predecessors to this job run.

*/ inline JobRun& AddPredecessorRuns(const Predecessor& value) { m_predecessorRunsHasBeenSet = true; m_predecessorRuns.push_back(value); return *this; } /** *

A list of predecessors to this job run.

*/ inline JobRun& AddPredecessorRuns(Predecessor&& value) { m_predecessorRunsHasBeenSet = true; m_predecessorRuns.push_back(std::move(value)); return *this; } /** *

The amount of time (in seconds) that the job run consumed resources.

*/ inline int GetExecutionTime() const{ return m_executionTime; } /** *

The amount of time (in seconds) that the job run consumed resources.

*/ inline bool ExecutionTimeHasBeenSet() const { return m_executionTimeHasBeenSet; } /** *

The amount of time (in seconds) that the job run consumed resources.

*/ inline void SetExecutionTime(int value) { m_executionTimeHasBeenSet = true; m_executionTime = value; } /** *

The amount of time (in seconds) that the job run consumed resources.

*/ inline JobRun& WithExecutionTime(int value) { SetExecutionTime(value); return *this;} /** *

The JobRun timeout in minutes. This is the maximum time that a * job run can consume resources before it is terminated and enters * TIMEOUT status. This value overrides the timeout value set in the * parent job.

Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).

*/ inline int GetTimeout() const{ return m_timeout; } /** *

The JobRun timeout in minutes. This is the maximum time that a * job run can consume resources before it is terminated and enters * TIMEOUT status. This value overrides the timeout value set in the * parent job.

Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).

*/ inline bool TimeoutHasBeenSet() const { return m_timeoutHasBeenSet; } /** *

The JobRun timeout in minutes. This is the maximum time that a * job run can consume resources before it is terminated and enters * TIMEOUT status. This value overrides the timeout value set in the * parent job.

Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).

*/ inline void SetTimeout(int value) { m_timeoutHasBeenSet = true; m_timeout = value; } /** *

The JobRun timeout in minutes. This is the maximum time that a * job run can consume resources before it is terminated and enters * TIMEOUT status. This value overrides the timeout value set in the * parent job.

Streaming jobs do not have a timeout. The default for * non-streaming jobs is 2,880 minutes (48 hours).

*/ inline JobRun& WithTimeout(int value) { SetTimeout(value); return *this;} /** *

For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For * Glue version 2.0+ jobs, you cannot specify a Maximum capacity. * Instead, you should specify a Worker type and the Number of * workers.

Do not set MaxCapacity if using * WorkerType and NumberOfWorkers.

The value that * can be allocated for MaxCapacity depends on whether you are running * a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL * job:

  • When you specify a Python shell job * (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or * 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache * Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 * to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU * allocation.

*/ inline double GetMaxCapacity() const{ return m_maxCapacity; } /** *

For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For * Glue version 2.0+ jobs, you cannot specify a Maximum capacity. * Instead, you should specify a Worker type and the Number of * workers.

Do not set MaxCapacity if using * WorkerType and NumberOfWorkers.

The value that * can be allocated for MaxCapacity depends on whether you are running * a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL * job:

  • When you specify a Python shell job * (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or * 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache * Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 * to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU * allocation.

*/ inline bool MaxCapacityHasBeenSet() const { return m_maxCapacityHasBeenSet; } /** *

For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For * Glue version 2.0+ jobs, you cannot specify a Maximum capacity. * Instead, you should specify a Worker type and the Number of * workers.

Do not set MaxCapacity if using * WorkerType and NumberOfWorkers.

The value that * can be allocated for MaxCapacity depends on whether you are running * a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL * job:

  • When you specify a Python shell job * (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or * 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache * Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 * to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU * allocation.

*/ inline void SetMaxCapacity(double value) { m_maxCapacityHasBeenSet = true; m_maxCapacity = value; } /** *

For Glue version 1.0 or earlier jobs, using the standard worker type, the * number of Glue data processing units (DPUs) that can be allocated when this job * runs. A DPU is a relative measure of processing power that consists of 4 vCPUs * of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For * Glue version 2.0+ jobs, you cannot specify a Maximum capacity. * Instead, you should specify a Worker type and the Number of * workers.

Do not set MaxCapacity if using * WorkerType and NumberOfWorkers.

The value that * can be allocated for MaxCapacity depends on whether you are running * a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL * job:

  • When you specify a Python shell job * (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or * 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache * Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 * to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU * allocation.

*/ inline JobRun& WithMaxCapacity(double value) { SetMaxCapacity(value); return *this;} /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline const WorkerType& GetWorkerType() const{ return m_workerType; } /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline bool WorkerTypeHasBeenSet() const { return m_workerTypeHasBeenSet; } /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline void SetWorkerType(const WorkerType& value) { m_workerTypeHasBeenSet = true; m_workerType = value; } /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline void SetWorkerType(WorkerType&& value) { m_workerTypeHasBeenSet = true; m_workerType = std::move(value); } /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline JobRun& WithWorkerType(const WorkerType& value) { SetWorkerType(value); return *this;} /** *

The type of predefined worker that is allocated when a job runs. Accepts a * value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X * for Ray jobs.

  • For the G.1X worker type, each * worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately * 34GB free), and provides 1 executor per worker. We recommend this worker type * for workloads such as data transforms, joins, and queries, to offers a scalable * and cost effective way to run most jobs.

  • For the * G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of * memory) with 128GB disk (approximately 77GB free), and provides 1 executor per * worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most * jobs.

  • For the G.4X worker type, each worker maps * to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), * and provides 1 executor per worker. We recommend this worker type for jobs whose * workloads contain your most demanding transforms, aggregations, joins, and * queries. This worker type is available only for Glue version 3.0 or later Spark * ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East * (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific * (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe * (Ireland), and Europe (Stockholm).

  • For the G.8X * worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk (approximately 487GB free), and provides 1 executor per worker. We * recommend this worker type for jobs whose workloads contain your most demanding * transforms, aggregations, joins, and queries. This worker type is available only * for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services * Regions as supported for the G.4X worker type.

  • *

    For the G.025X worker type, each worker maps to 0.25 DPU (2 * vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 * executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 streaming * jobs.

  • For the Z.2X worker type, each worker maps * to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB * free), and provides up to 8 Ray workers based on the autoscaler.

*/ inline JobRun& WithWorkerType(WorkerType&& value) { SetWorkerType(std::move(value)); return *this;} /** *

The number of workers of a defined workerType that are allocated * when a job runs.

*/ inline int GetNumberOfWorkers() const{ return m_numberOfWorkers; } /** *

The number of workers of a defined workerType that are allocated * when a job runs.

*/ inline bool NumberOfWorkersHasBeenSet() const { return m_numberOfWorkersHasBeenSet; } /** *

The number of workers of a defined workerType that are allocated * when a job runs.

*/ inline void SetNumberOfWorkers(int value) { m_numberOfWorkersHasBeenSet = true; m_numberOfWorkers = value; } /** *

The number of workers of a defined workerType that are allocated * when a job runs.

*/ inline JobRun& WithNumberOfWorkers(int value) { SetNumberOfWorkers(value); return *this;} /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline const Aws::String& GetSecurityConfiguration() const{ return m_securityConfiguration; } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline bool SecurityConfigurationHasBeenSet() const { return m_securityConfigurationHasBeenSet; } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline void SetSecurityConfiguration(const Aws::String& value) { m_securityConfigurationHasBeenSet = true; m_securityConfiguration = value; } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline void SetSecurityConfiguration(Aws::String&& value) { m_securityConfigurationHasBeenSet = true; m_securityConfiguration = std::move(value); } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline void SetSecurityConfiguration(const char* value) { m_securityConfigurationHasBeenSet = true; m_securityConfiguration.assign(value); } /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline JobRun& WithSecurityConfiguration(const Aws::String& value) { SetSecurityConfiguration(value); return *this;} /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline JobRun& WithSecurityConfiguration(Aws::String&& value) { SetSecurityConfiguration(std::move(value)); return *this;} /** *

The name of the SecurityConfiguration structure to be used with * this job run.

*/ inline JobRun& WithSecurityConfiguration(const char* value) { SetSecurityConfiguration(value); return *this;} /** *

The name of the log group for secure logging that can be server-side * encrypted in Amazon CloudWatch using KMS. This name can be * /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration * name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then * that security configuration is used to encrypt the log group.

*/ inline const Aws::String& GetLogGroupName() const{ return m_logGroupName; } /** *

The name of the log group for secure logging that can be server-side * encrypted in Amazon CloudWatch using KMS. This name can be * /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration * name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then * that security configuration is used to encrypt the log group.

*/ inline bool LogGroupNameHasBeenSet() const { return m_logGroupNameHasBeenSet; } /** *

The name of the log group for secure logging that can be server-side * encrypted in Amazon CloudWatch using KMS. This name can be * /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration * name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then * that security configuration is used to encrypt the log group.

*/ inline void SetLogGroupName(const Aws::String& value) { m_logGroupNameHasBeenSet = true; m_logGroupName = value; } /** *

The name of the log group for secure logging that can be server-side * encrypted in Amazon CloudWatch using KMS. This name can be * /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration * name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then * that security configuration is used to encrypt the log group.

*/ inline void SetLogGroupName(Aws::String&& value) { m_logGroupNameHasBeenSet = true; m_logGroupName = std::move(value); } /** *

The name of the log group for secure logging that can be server-side * encrypted in Amazon CloudWatch using KMS. This name can be * /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration * name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then * that security configuration is used to encrypt the log group.

*/ inline void SetLogGroupName(const char* value) { m_logGroupNameHasBeenSet = true; m_logGroupName.assign(value); } /** *

The name of the log group for secure logging that can be server-side * encrypted in Amazon CloudWatch using KMS. This name can be * /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration * name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then * that security configuration is used to encrypt the log group.

*/ inline JobRun& WithLogGroupName(const Aws::String& value) { SetLogGroupName(value); return *this;} /** *

The name of the log group for secure logging that can be server-side * encrypted in Amazon CloudWatch using KMS. This name can be * /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration * name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then * that security configuration is used to encrypt the log group.

*/ inline JobRun& WithLogGroupName(Aws::String&& value) { SetLogGroupName(std::move(value)); return *this;} /** *

The name of the log group for secure logging that can be server-side * encrypted in Amazon CloudWatch using KMS. This name can be * /aws-glue/jobs/, in which case the default encryption is * NONE. If you add a role name and SecurityConfiguration * name (in other words, * /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then * that security configuration is used to encrypt the log group.

*/ inline JobRun& WithLogGroupName(const char* value) { SetLogGroupName(value); return *this;} /** *

Specifies configuration properties of a job run notification.

*/ inline const NotificationProperty& GetNotificationProperty() const{ return m_notificationProperty; } /** *

Specifies configuration properties of a job run notification.

*/ inline bool NotificationPropertyHasBeenSet() const { return m_notificationPropertyHasBeenSet; } /** *

Specifies configuration properties of a job run notification.

*/ inline void SetNotificationProperty(const NotificationProperty& value) { m_notificationPropertyHasBeenSet = true; m_notificationProperty = value; } /** *

Specifies configuration properties of a job run notification.

*/ inline void SetNotificationProperty(NotificationProperty&& value) { m_notificationPropertyHasBeenSet = true; m_notificationProperty = std::move(value); } /** *

Specifies configuration properties of a job run notification.

*/ inline JobRun& WithNotificationProperty(const NotificationProperty& value) { SetNotificationProperty(value); return *this;} /** *

Specifies configuration properties of a job run notification.

*/ inline JobRun& WithNotificationProperty(NotificationProperty&& value) { SetNotificationProperty(std::move(value)); return *this;} /** *

In Spark jobs, GlueVersion determines the versions of Apache * Spark and Python that Glue available in a job. The Python version indicates the * version supported for jobs of type Spark.

Ray jobs should set * GlueVersion to 4.0 or greater. However, the versions * of Ray, Python and additional libraries available in your Ray job are determined * by the Runtime parameter of the Job command.

For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.

Jobs that are created without specifying a Glue * version default to Glue 0.9.

*/ inline const Aws::String& GetGlueVersion() const{ return m_glueVersion; } /** *

In Spark jobs, GlueVersion determines the versions of Apache * Spark and Python that Glue available in a job. The Python version indicates the * version supported for jobs of type Spark.

Ray jobs should set * GlueVersion to 4.0 or greater. However, the versions * of Ray, Python and additional libraries available in your Ray job are determined * by the Runtime parameter of the Job command.

For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.

Jobs that are created without specifying a Glue * version default to Glue 0.9.

*/ inline bool GlueVersionHasBeenSet() const { return m_glueVersionHasBeenSet; } /** *

In Spark jobs, GlueVersion determines the versions of Apache * Spark and Python that Glue available in a job. The Python version indicates the * version supported for jobs of type Spark.

Ray jobs should set * GlueVersion to 4.0 or greater. However, the versions * of Ray, Python and additional libraries available in your Ray job are determined * by the Runtime parameter of the Job command.

For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.

Jobs that are created without specifying a Glue * version default to Glue 0.9.

*/ inline void SetGlueVersion(const Aws::String& value) { m_glueVersionHasBeenSet = true; m_glueVersion = value; } /** *

In Spark jobs, GlueVersion determines the versions of Apache * Spark and Python that Glue available in a job. The Python version indicates the * version supported for jobs of type Spark.

Ray jobs should set * GlueVersion to 4.0 or greater. However, the versions * of Ray, Python and additional libraries available in your Ray job are determined * by the Runtime parameter of the Job command.

For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.

Jobs that are created without specifying a Glue * version default to Glue 0.9.

*/ inline void SetGlueVersion(Aws::String&& value) { m_glueVersionHasBeenSet = true; m_glueVersion = std::move(value); } /** *

In Spark jobs, GlueVersion determines the versions of Apache * Spark and Python that Glue available in a job. The Python version indicates the * version supported for jobs of type Spark.

Ray jobs should set * GlueVersion to 4.0 or greater. However, the versions * of Ray, Python and additional libraries available in your Ray job are determined * by the Runtime parameter of the Job command.

For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.

Jobs that are created without specifying a Glue * version default to Glue 0.9.

*/ inline void SetGlueVersion(const char* value) { m_glueVersionHasBeenSet = true; m_glueVersion.assign(value); } /** *

In Spark jobs, GlueVersion determines the versions of Apache * Spark and Python that Glue available in a job. The Python version indicates the * version supported for jobs of type Spark.

Ray jobs should set * GlueVersion to 4.0 or greater. However, the versions * of Ray, Python and additional libraries available in your Ray job are determined * by the Runtime parameter of the Job command.

For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.

Jobs that are created without specifying a Glue * version default to Glue 0.9.

*/ inline JobRun& WithGlueVersion(const Aws::String& value) { SetGlueVersion(value); return *this;} /** *

In Spark jobs, GlueVersion determines the versions of Apache * Spark and Python that Glue available in a job. The Python version indicates the * version supported for jobs of type Spark.

Ray jobs should set * GlueVersion to 4.0 or greater. However, the versions * of Ray, Python and additional libraries available in your Ray job are determined * by the Runtime parameter of the Job command.

For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.

Jobs that are created without specifying a Glue * version default to Glue 0.9.

*/ inline JobRun& WithGlueVersion(Aws::String&& value) { SetGlueVersion(std::move(value)); return *this;} /** *

In Spark jobs, GlueVersion determines the versions of Apache * Spark and Python that Glue available in a job. The Python version indicates the * version supported for jobs of type Spark.

Ray jobs should set * GlueVersion to 4.0 or greater. However, the versions * of Ray, Python and additional libraries available in your Ray job are determined * by the Runtime parameter of the Job command.

For more * information about the available Glue versions and corresponding Spark and Python * versions, see Glue version * in the developer guide.

Jobs that are created without specifying a Glue * version default to Glue 0.9.

*/ inline JobRun& WithGlueVersion(const char* value) { SetGlueVersion(value); return *this;} /** *

This field populates only for Auto Scaling job runs, and represents the total * time each executor ran during the lifecycle of a job run in seconds, multiplied * by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for * G.025X workers). This value may be different than the * executionEngineRuntime * MaxCapacity as in the case of * Auto Scaling jobs, as the number of executors running at a given time may be * less than the MaxCapacity. Therefore, it is possible that the value * of DPUSeconds is less than executionEngineRuntime * * MaxCapacity.

*/ inline double GetDPUSeconds() const{ return m_dPUSeconds; } /** *

This field populates only for Auto Scaling job runs, and represents the total * time each executor ran during the lifecycle of a job run in seconds, multiplied * by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for * G.025X workers). This value may be different than the * executionEngineRuntime * MaxCapacity as in the case of * Auto Scaling jobs, as the number of executors running at a given time may be * less than the MaxCapacity. Therefore, it is possible that the value * of DPUSeconds is less than executionEngineRuntime * * MaxCapacity.

*/ inline bool DPUSecondsHasBeenSet() const { return m_dPUSecondsHasBeenSet; } /** *

This field populates only for Auto Scaling job runs, and represents the total * time each executor ran during the lifecycle of a job run in seconds, multiplied * by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for * G.025X workers). This value may be different than the * executionEngineRuntime * MaxCapacity as in the case of * Auto Scaling jobs, as the number of executors running at a given time may be * less than the MaxCapacity. Therefore, it is possible that the value * of DPUSeconds is less than executionEngineRuntime * * MaxCapacity.

*/ inline void SetDPUSeconds(double value) { m_dPUSecondsHasBeenSet = true; m_dPUSeconds = value; } /** *

This field populates only for Auto Scaling job runs, and represents the total * time each executor ran during the lifecycle of a job run in seconds, multiplied * by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for * G.025X workers). This value may be different than the * executionEngineRuntime * MaxCapacity as in the case of * Auto Scaling jobs, as the number of executors running at a given time may be * less than the MaxCapacity. Therefore, it is possible that the value * of DPUSeconds is less than executionEngineRuntime * * MaxCapacity.

*/ inline JobRun& WithDPUSeconds(double value) { SetDPUSeconds(value); return *this;} /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline const ExecutionClass& GetExecutionClass() const{ return m_executionClass; } /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline bool ExecutionClassHasBeenSet() const { return m_executionClassHasBeenSet; } /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline void SetExecutionClass(const ExecutionClass& value) { m_executionClassHasBeenSet = true; m_executionClass = value; } /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline void SetExecutionClass(ExecutionClass&& value) { m_executionClassHasBeenSet = true; m_executionClass = std::move(value); } /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline JobRun& WithExecutionClass(const ExecutionClass& value) { SetExecutionClass(value); return *this;} /** *

Indicates whether the job is run with a standard or flexible execution class. * The standard execution-class is ideal for time-sensitive workloads that require * fast job startup and dedicated resources.

The flexible execution class is * appropriate for time-insensitive jobs whose start and completion times may vary. *

Only jobs with Glue version 3.0 and above and command type * glueetl will be allowed to set ExecutionClass to * FLEX. The flexible execution class is available for Spark jobs.

*/ inline JobRun& WithExecutionClass(ExecutionClass&& value) { SetExecutionClass(std::move(value)); return *this;} private: Aws::String m_id; bool m_idHasBeenSet = false; int m_attempt; bool m_attemptHasBeenSet = false; Aws::String m_previousRunId; bool m_previousRunIdHasBeenSet = false; Aws::String m_triggerName; bool m_triggerNameHasBeenSet = false; Aws::String m_jobName; bool m_jobNameHasBeenSet = false; Aws::Utils::DateTime m_startedOn; bool m_startedOnHasBeenSet = false; Aws::Utils::DateTime m_lastModifiedOn; bool m_lastModifiedOnHasBeenSet = false; Aws::Utils::DateTime m_completedOn; bool m_completedOnHasBeenSet = false; JobRunState m_jobRunState; bool m_jobRunStateHasBeenSet = false; Aws::Map m_arguments; bool m_argumentsHasBeenSet = false; Aws::String m_errorMessage; bool m_errorMessageHasBeenSet = false; Aws::Vector m_predecessorRuns; bool m_predecessorRunsHasBeenSet = false; int m_executionTime; bool m_executionTimeHasBeenSet = false; int m_timeout; bool m_timeoutHasBeenSet = false; double m_maxCapacity; bool m_maxCapacityHasBeenSet = false; WorkerType m_workerType; bool m_workerTypeHasBeenSet = false; int m_numberOfWorkers; bool m_numberOfWorkersHasBeenSet = false; Aws::String m_securityConfiguration; bool m_securityConfigurationHasBeenSet = false; Aws::String m_logGroupName; bool m_logGroupNameHasBeenSet = false; NotificationProperty m_notificationProperty; bool m_notificationPropertyHasBeenSet = false; Aws::String m_glueVersion; bool m_glueVersionHasBeenSet = false; double m_dPUSeconds; bool m_dPUSecondsHasBeenSet = false; ExecutionClass m_executionClass; bool m_executionClassHasBeenSet = false; }; } // namespace Model } // namespace Glue } // namespace Aws