/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* * Do not modify this file. This file is generated from the glue-2017-03-31.normal.json service model. */ using System; using System.Collections.Generic; using System.Xml.Serialization; using System.Text; using System.IO; using System.Net; using Amazon.Runtime; using Amazon.Runtime.Internal; namespace Amazon.Glue.Model { /// /// Container for the parameters to the StartJobRun operation. /// Starts a job run using a job definition. /// public partial class StartJobRunRequest : AmazonGlueRequest { private int? _allocatedCapacity; private Dictionary _arguments = new Dictionary(); private ExecutionClass _executionClass; private string _jobName; private string _jobRunId; private double? _maxCapacity; private NotificationProperty _notificationProperty; private int? _numberOfWorkers; private string _securityConfiguration; private int? _timeout; private WorkerType _workerType; /// /// Gets and sets the property AllocatedCapacity. /// /// This field is deprecated. Use MaxCapacity instead. /// /// /// /// The number of Glue data processing units (DPUs) to allocate to this JobRun. You can /// allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing /// power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, /// see the Glue pricing page. /// /// [Obsolete("This property is deprecated, use MaxCapacity instead.")] public int AllocatedCapacity { get { return this._allocatedCapacity.GetValueOrDefault(); } set { this._allocatedCapacity = value; } } // Check to see if AllocatedCapacity property is set internal bool IsSetAllocatedCapacity() { return this._allocatedCapacity.HasValue; } /// /// Gets and sets the property Arguments. /// /// The job arguments associated with this run. For this job run, they replace the default /// arguments set in the job definition itself. /// /// /// /// You can specify arguments here that your own job-execution script consumes, as well /// as arguments that Glue itself consumes. /// /// /// /// Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve /// secrets from a Glue Connection, Secrets Manager or other secret management mechanism /// if you intend to keep them within the Job. /// /// /// /// For information about how to specify and consume your own Job arguments, see the Calling /// Glue APIs in Python topic in the developer guide. /// /// /// /// For information about the arguments you can provide to this field when configuring /// Spark jobs, see the Special /// Parameters Used by Glue topic in the developer guide. /// /// /// /// For information about the arguments you can provide to this field when configuring /// Ray jobs, see Using /// job parameters in Ray jobs in the developer guide. /// /// public Dictionary Arguments { get { return this._arguments; } set { this._arguments = value; } } // Check to see if Arguments property is set internal bool IsSetArguments() { return this._arguments != null && this._arguments.Count > 0; } /// /// Gets and sets the property ExecutionClass. /// /// Indicates whether the job is run with a standard or flexible execution class. The /// standard execution-class is ideal for time-sensitive workloads that require fast job /// startup and dedicated resources. /// /// /// /// The flexible execution class is appropriate for time-insensitive jobs whose start /// and completion times may vary. /// /// /// /// Only jobs with Glue version 3.0 and above and command type glueetl will /// be allowed to set ExecutionClass to FLEX. The flexible execution /// class is available for Spark jobs. /// /// [AWSProperty(Max=16)] public ExecutionClass ExecutionClass { get { return this._executionClass; } set { this._executionClass = value; } } // Check to see if ExecutionClass property is set internal bool IsSetExecutionClass() { return this._executionClass != null; } /// /// Gets and sets the property JobName. /// /// The name of the job definition to use. /// /// [AWSProperty(Required=true, Min=1, Max=255)] public string JobName { get { return this._jobName; } set { this._jobName = value; } } // Check to see if JobName property is set internal bool IsSetJobName() { return this._jobName != null; } /// /// Gets and sets the property JobRunId. /// /// The ID of a previous JobRun to retry. /// /// [AWSProperty(Min=1, Max=255)] public string JobRunId { get { return this._jobRunId; } set { this._jobRunId = value; } } // Check to see if JobRunId property is set internal bool IsSetJobRunId() { return this._jobRunId != null; } /// /// Gets and sets the property MaxCapacity. /// /// For Glue version 1.0 or earlier jobs, using the standard worker type, the number of /// Glue data processing units (DPUs) that can be allocated when this job runs. A DPU /// is a relative measure of processing power that consists of 4 vCPUs of compute capacity /// and 16 GB of memory. For more information, see the /// Glue pricing page. /// /// /// /// For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, /// you should specify a Worker type and the Number of workers. /// /// /// /// Do not set MaxCapacity if using WorkerType and NumberOfWorkers. /// /// /// /// The value that can be allocated for MaxCapacity depends on whether you /// are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming /// ETL job: /// ///
  • /// /// When you specify a Python shell job (JobCommand.Name="pythonshell"), /// you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. /// ///
  • /// /// When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") /// or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), /// you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot /// have a fractional DPU allocation. /// ///
///
public double MaxCapacity { get { return this._maxCapacity.GetValueOrDefault(); } set { this._maxCapacity = value; } } // Check to see if MaxCapacity property is set internal bool IsSetMaxCapacity() { return this._maxCapacity.HasValue; } /// /// Gets and sets the property NotificationProperty. /// /// Specifies configuration properties of a job run notification. /// /// public NotificationProperty NotificationProperty { get { return this._notificationProperty; } set { this._notificationProperty = value; } } // Check to see if NotificationProperty property is set internal bool IsSetNotificationProperty() { return this._notificationProperty != null; } /// /// Gets and sets the property NumberOfWorkers. /// /// The number of workers of a defined workerType that are allocated when /// a job runs. /// /// public int NumberOfWorkers { get { return this._numberOfWorkers.GetValueOrDefault(); } set { this._numberOfWorkers = value; } } // Check to see if NumberOfWorkers property is set internal bool IsSetNumberOfWorkers() { return this._numberOfWorkers.HasValue; } /// /// Gets and sets the property SecurityConfiguration. /// /// The name of the SecurityConfiguration structure to be used with this /// job run. /// /// [AWSProperty(Min=1, Max=255)] public string SecurityConfiguration { get { return this._securityConfiguration; } set { this._securityConfiguration = value; } } // Check to see if SecurityConfiguration property is set internal bool IsSetSecurityConfiguration() { return this._securityConfiguration != null; } /// /// Gets and sets the property Timeout. /// /// The JobRun timeout in minutes. This is the maximum time that a job run /// can consume resources before it is terminated and enters TIMEOUT status. /// This value overrides the timeout value set in the parent job. /// /// /// /// Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 /// minutes (48 hours). /// /// [AWSProperty(Min=1)] public int Timeout { get { return this._timeout.GetValueOrDefault(); } set { this._timeout = value; } } // Check to see if Timeout property is set internal bool IsSetTimeout() { return this._timeout.HasValue; } /// /// Gets and sets the property WorkerType. /// /// The type of predefined worker that is allocated when a job runs. Accepts a value of /// G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. /// ///
  • /// /// For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of /// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. /// We recommend this worker type for workloads such as data transforms, joins, and queries, /// to offers a scalable and cost effective way to run most jobs. /// ///
  • /// /// For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of /// memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. /// We recommend this worker type for workloads such as data transforms, joins, and queries, /// to offers a scalable and cost effective way to run most jobs. /// ///
  • /// /// For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB /// of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per /// worker. We recommend this worker type for jobs whose workloads contain your most demanding /// transforms, aggregations, joins, and queries. This worker type is available only for /// Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: /// US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), /// Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), /// Europe (Ireland), and Europe (Stockholm). /// ///
  • /// /// For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB /// of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per /// worker. We recommend this worker type for jobs whose workloads contain your most demanding /// transforms, aggregations, joins, and queries. This worker type is available only for /// Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions /// as supported for the G.4X worker type. /// ///
  • /// /// For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 /// GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per /// worker. We recommend this worker type for low volume streaming jobs. This worker type /// is only available for Glue version 3.0 streaming jobs. /// ///
  • /// /// For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB /// of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers /// based on the autoscaler. /// ///
///
public WorkerType WorkerType { get { return this._workerType; } set { this._workerType = value; } } // Check to see if WorkerType property is set internal bool IsSetWorkerType() { return this._workerType != null; } } }