// Code generated by smithy-go-codegen DO NOT EDIT. package types import ( smithydocument "github.com/aws/smithy-go/document" ) // An object that represents an Batch array job. type ArrayProperties struct { // The size of the array job. Size *int32 noSmithyDocumentSerde } // An object that represents the array properties of a job. type ArrayPropertiesDetail struct { // The job index within the array that's associated with this job. This parameter // is returned for array job children. Index *int32 // The size of the array job. This parameter is returned for parent array jobs. Size *int32 // A summary of the number of array job children in each available job status. // This parameter is returned for parent array jobs. StatusSummary map[string]int32 noSmithyDocumentSerde } // An object that represents the array properties of a job. type ArrayPropertiesSummary struct { // The job index within the array that's associated with this job. This parameter // is returned for children of array jobs. Index *int32 // The size of the array job. This parameter is returned for parent array jobs. Size *int32 noSmithyDocumentSerde } // An object that represents the details of a container that's part of a job // attempt. type AttemptContainerDetail struct { // The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts // the job attempt. ContainerInstanceArn *string // The exit code for the job attempt. A non-zero exit code is considered failed. ExitCode *int32 // The name of the CloudWatch Logs log stream that's associated with the // container. The log group for Batch jobs is /aws/batch/job . Each container // attempt receives a log stream name when they reach the RUNNING status. LogStreamName *string // The network interfaces that are associated with the job attempt. NetworkInterfaces []NetworkInterface // A short (255 max characters) human-readable string to provide additional // details for a running or stopped container. Reason *string // The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with // the job attempt. Each container attempt receives a task ARN when they reach the // STARTING status. TaskArn *string noSmithyDocumentSerde } // An object that represents a job attempt. type AttemptDetail struct { // The details for the container in this job attempt. Container *AttemptContainerDetail // The Unix timestamp (in milliseconds) for when the attempt was started (when the // attempt transitioned from the STARTING state to the RUNNING state). StartedAt *int64 // A short, human-readable string to provide additional details for the current // status of the job attempt. StatusReason *string // The Unix timestamp (in milliseconds) for when the attempt was stopped (when the // attempt transitioned from the RUNNING state to a terminal state, such as // SUCCEEDED or FAILED ). StoppedAt *int64 noSmithyDocumentSerde } // An object that represents an Batch compute environment. type ComputeEnvironmentDetail struct { // The Amazon Resource Name (ARN) of the compute environment. // // This member is required. ComputeEnvironmentArn *string // The name of the compute environment. It can be up to 128 characters long. It // can contain uppercase and lowercase letters, numbers, hyphens (-), and // underscores (_). // // This member is required. ComputeEnvironmentName *string // The compute resources defined for the compute environment. For more // information, see Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) // in the Batch User Guide. ComputeResources *ComputeResource // The orchestration type of the compute environment. The valid values are ECS // (default) or EKS . ContainerOrchestrationType OrchestrationType // The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster that the // compute environment uses. EcsClusterArn *string // The configuration for the Amazon EKS cluster that supports the Batch compute // environment. Only specify this parameter if the containerOrchestrationType is // EKS . EksConfiguration *EksConfiguration // The service role that's associated with the compute environment that allows // Batch to make calls to Amazon Web Services API operations on your behalf. For // more information, see Batch service IAM role (https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html) // in the Batch User Guide. ServiceRole *string // The state of the compute environment. The valid values are ENABLED or DISABLED . // If the state is ENABLED , then the Batch scheduler can attempt to place jobs // from an associated job queue on the compute resources within the environment. If // the compute environment is managed, then it can scale its instances out or in // automatically based on the job queue demand. If the state is DISABLED , then the // Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a // STARTING or RUNNING state continue to progress normally. Managed compute // environments in the DISABLED state don't scale out. Compute environments in a // DISABLED state may continue to incur billing charges. To prevent additional // charges, turn off and then delete the compute environment. For more information, // see State (https://docs.aws.amazon.com/batch/latest/userguide/compute_environment_parameters.html#compute_environment_state) // in the Batch User Guide. When an instance is idle, the instance scales down to // the minvCpus value. However, the instance size doesn't change. For example, // consider a c5.8xlarge instance with a minvCpus value of 4 and a desiredvCpus // value of 36 . This instance doesn't scale down to a c5.large instance. State CEState // The current status of the compute environment (for example, CREATING or VALID ). Status CEStatus // A short, human-readable string to provide additional details for the current // status of the compute environment. StatusReason *string // The tags applied to the compute environment. Tags map[string]string // The type of the compute environment: MANAGED or UNMANAGED . For more // information, see Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) // in the Batch User Guide. Type CEType // The maximum number of VCPUs expected to be used for an unmanaged compute // environment. UnmanagedvCpus *int32 // Specifies the infrastructure update policy for the compute environment. For // more information about infrastructure updates, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. UpdatePolicy *UpdatePolicy // Unique identifier for the compute environment. Uuid *string noSmithyDocumentSerde } // The order that compute environments are tried in for job placement within a // queue. Compute environments are tried in ascending order. For example, if two // compute environments are associated with a job queue, the compute environment // with a lower order integer value is tried for job placement first. Compute // environments must be in the VALID state before you can associate them with a // job queue. All of the compute environments must be either EC2 ( EC2 or SPOT ) or // Fargate ( FARGATE or FARGATE_SPOT ); EC2 and Fargate compute environments can't // be mixed. All compute environments that are associated with a job queue must // share the same architecture. Batch doesn't support mixing compute environment // architecture types in a single job queue. type ComputeEnvironmentOrder struct { // The Amazon Resource Name (ARN) of the compute environment. // // This member is required. ComputeEnvironment *string // The order of the compute environment. Compute environments are tried in // ascending order. For example, if two compute environments are associated with a // job queue, the compute environment with a lower order integer value is tried // for job placement first. // // This member is required. Order *int32 noSmithyDocumentSerde } // An object that represents an Batch compute resource. For more information, see // Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) // in the Batch User Guide. type ComputeResource struct { // The maximum number of vCPUs that a compute environment can support. With both // BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies using // On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, // Batch might need to exceed maxvCpus to meet your capacity requirements. In this // event, Batch never exceeds maxvCpus by more than a single instance. For // example, no more than a single instance from among those specified in your // compute environment is allocated. // // This member is required. MaxvCpus *int32 // The VPC subnets where the compute resources are launched. These subnets must be // within the same VPC. Fargate compute resources can contain up to 16 subnets. For // more information, see VPCs and subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) // in the Amazon VPC User Guide. Batch on Amazon EC2 and Batch on Amazon EKS // support Local Zones. For more information, see Local Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones) // in the Amazon EC2 User Guide for Linux Instances, Amazon EKS and Amazon Web // Services Local Zones (https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html) // in the Amazon EKS User Guide and Amazon ECS clusters in Local Zones, Wavelength // Zones, and Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones) // in the Amazon ECS Developer Guide. Batch on Fargate doesn't currently support // Local Zones. // // This member is required. Subnets []string // The type of compute environment: EC2 , SPOT , FARGATE , or FARGATE_SPOT . For // more information, see Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) // in the Batch User Guide. If you choose SPOT , you must also specify an Amazon // EC2 Spot Fleet role with the spotIamFleetRole parameter. For more information, // see Amazon EC2 spot fleet role (https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html) // in the Batch User Guide. // // This member is required. Type CRType // The allocation strategy to use for the compute resource if not enough instances // of the best fitting instance type can be allocated. This might be because of // availability of the instance type in the Region or Amazon EC2 service limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) // . For more information, see Allocation strategies (https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. BEST_FIT (default) Batch selects // an instance type that best fits the needs of the jobs with a preference for the // lowest-cost instance type. If additional instances of the selected instance type // aren't available, Batch waits for the additional instances to be available. If // there aren't enough instances available or the user is reaching Amazon EC2 // service limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) // , additional jobs aren't run until the currently running jobs are completed. // This allocation strategy keeps costs lower but can limit scaling. If you're // using Spot Fleets with BEST_FIT , the Spot Fleet IAM Role must be specified. // Compute resources that use a BEST_FIT allocation strategy don't support // infrastructure updates and can't update some parameters. For more information, // see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. BEST_FIT_PROGRESSIVE Batch selects additional instance // types that are large enough to meet the requirements of the jobs in the queue. // Its preference is for instance types with lower cost vCPUs. If additional // instances of the previously selected instance types aren't available, Batch // selects new instance types. SPOT_CAPACITY_OPTIMIZED Batch selects one or more // instance types that are large enough to meet the requirements of the jobs in the // queue. Its preference is for instance types that are less likely to be // interrupted. This allocation strategy is only available for Spot Instance // compute resources. With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED // strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using // Spot Instances, Batch might need to exceed maxvCpus to meet your capacity // requirements. In this event, Batch never exceeds maxvCpus by more than a single // instance. AllocationStrategy CRAllocationStrategy // The maximum percentage that a Spot Instance price can be when compared with the // On-Demand price for that instance type before instances are launched. For // example, if your maximum percentage is 20%, then the Spot price must be less // than 20% of the current On-Demand price for that Amazon EC2 instance. You always // pay the lowest (market) price and never more than your maximum percentage. If // you leave this field empty, the default value is 100% of the On-Demand price. // For most use cases, we recommend leaving this field empty. This parameter isn't // applicable to jobs that are running on Fargate resources. Don't specify it. BidPercentage *int32 // The desired number of vCPUS in the compute environment. Batch modifies this // value between the minimum and maximum values based on job queue demand. This // parameter isn't applicable to jobs that are running on Fargate resources. Don't // specify it. DesiredvCpus *int32 // Provides information that's used to select Amazon Machine Images (AMIs) for EC2 // instances in the compute environment. If Ec2Configuration isn't specified, the // default is ECS_AL2 . One or two values can be provided. This parameter isn't // applicable to jobs that are running on Fargate resources. Don't specify it. Ec2Configuration []Ec2Configuration // The Amazon EC2 key pair that's used for instances launched in the compute // environment. You can use this key pair to log in to your instances with SSH. // This parameter isn't applicable to jobs that are running on Fargate resources. // Don't specify it. Ec2KeyPair *string // The Amazon Machine Image (AMI) ID used for instances launched in the compute // environment. This parameter is overridden by the imageIdOverride member of the // Ec2Configuration structure. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. The AMI that you choose for a // compute environment must match the architecture of the instance types that you // intend to use for that compute environment. For example, if your compute // environment uses A1 instance types, the compute resource AMI that you choose // must support ARM instances. Amazon ECS vends both x86 and ARM versions of the // Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon // ECS-optimized Amazon Linux 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html) // in the Amazon Elastic Container Service Developer Guide. // // Deprecated: This field is deprecated, use ec2Configuration[].imageIdOverride // instead. ImageId *string // The Amazon ECS instance profile applied to Amazon EC2 instances in a compute // environment. You can specify the short name or full Amazon Resource Name (ARN) // of an instance profile. For example, ecsInstanceRole or // arn:aws:iam:::instance-profile/ecsInstanceRole . For more information, see // Amazon ECS instance role (https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. InstanceRole *string // The instances types that can be launched. You can specify instance families to // launch any instance type within those families (for example, c5 or p3 ), or you // can specify specific sizes within a family (such as c5.8xlarge ). You can also // choose optimal to select instance types (from the C4, M4, and R4 instance // families) that match the demand of your job queues. This parameter isn't // applicable to jobs that are running on Fargate resources. Don't specify it. When // you create a compute environment, the instance types that you select for the // compute environment must share the same architecture. For example, you can't mix // x86 and ARM instances in the same compute environment. Currently, optimal uses // instance types from the C4, M4, and R4 instance families. In Regions that don't // have instance types from those instance families, instance types from the C5, // M5, and R5 instance families are used. InstanceTypes []string // The launch template to use for your compute resources. Any other compute // resource parameters that you specify in a CreateComputeEnvironment API // operation override the same parameters in the launch template. You must specify // either the launch template ID or launch template name in the request, but not // both. For more information, see Launch template support (https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. LaunchTemplate *LaunchTemplateSpecification // The minimum number of vCPUs that a compute environment should maintain (even if // the compute environment is DISABLED ). This parameter isn't applicable to jobs // that are running on Fargate resources. Don't specify it. MinvCpus *int32 // The Amazon EC2 placement group to associate with your compute resources. If you // intend to submit multi-node parallel jobs to your compute environment, you // should consider creating a cluster placement group and associate it with your // compute resources. This keeps your multi-node parallel job on a logical grouping // of instances within a single Availability Zone with high network flow potential. // For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon EC2 User Guide for Linux Instances. This parameter isn't // applicable to jobs that are running on Fargate resources. Don't specify it. PlacementGroup *string // The Amazon EC2 security groups that are associated with instances launched in // the compute environment. One or more security groups must be specified, either // in securityGroupIds or using a launch template referenced in launchTemplate . // This parameter is required for jobs that are running on Fargate resources and // must contain at least one security group. Fargate doesn't support launch // templates. If security groups are specified using both securityGroupIds and // launchTemplate , the values in securityGroupIds are used. SecurityGroupIds []string // The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to // a SPOT compute environment. This role is required if the allocation strategy // set to BEST_FIT or if the allocation strategy isn't specified. For more // information, see Amazon EC2 spot fleet role (https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. To tag your Spot Instances on // creation, the Spot Fleet IAM role specified here must use the newer // AmazonEC2SpotFleetTaggingRole managed policy. The previously recommended // AmazonEC2SpotFleetRole managed policy doesn't have the required permissions to // tag Spot Instances. For more information, see Spot instances not tagged on // creation (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#spot-instance-no-tag) // in the Batch User Guide. SpotIamFleetRole *string // Key-value pair tags to be applied to EC2 resources that are launched in the // compute environment. For Batch, these take the form of "String1": "String2" , // where String1 is the tag key and String2 is the tag value-for example, { // "Name": "Batch Instance - C4OnDemand" } . This is helpful for recognizing your // Batch instances in the Amazon EC2 console. Updating these tags requires an // infrastructure update to the compute environment. For more information, see // Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. These tags aren't seen when using the Batch // ListTagsForResource API operation. This parameter isn't applicable to jobs that // are running on Fargate resources. Don't specify it. Tags map[string]string noSmithyDocumentSerde } // An object that represents the attributes of a compute environment that can be // updated. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. type ComputeResourceUpdate struct { // The allocation strategy to use for the compute resource if there's not enough // instances of the best fitting instance type that can be allocated. This might be // because of availability of the instance type in the Region or Amazon EC2 // service limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) // . For more information, see Allocation strategies (https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html) // in the Batch User Guide. When updating a compute environment, changing the // allocation strategy requires an infrastructure update of the compute // environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. BEST_FIT isn't supported when updating a compute // environment. This parameter isn't applicable to jobs that are running on Fargate // resources. Don't specify it. BEST_FIT_PROGRESSIVE Batch selects additional // instance types that are large enough to meet the requirements of the jobs in the // queue. Its preference is for instance types with lower cost vCPUs. If additional // instances of the previously selected instance types aren't available, Batch // selects new instance types. SPOT_CAPACITY_OPTIMIZED Batch selects one or more // instance types that are large enough to meet the requirements of the jobs in the // queue. Its preference is for instance types that are less likely to be // interrupted. This allocation strategy is only available for Spot Instance // compute resources. With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED // strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using // Spot Instances, Batch might need to exceed maxvCpus to meet your capacity // requirements. In this event, Batch never exceeds maxvCpus by more than a single // instance. AllocationStrategy CRUpdateAllocationStrategy // The maximum percentage that a Spot Instance price can be when compared with the // On-Demand price for that instance type before instances are launched. For // example, if your maximum percentage is 20%, the Spot price must be less than 20% // of the current On-Demand price for that Amazon EC2 instance. You always pay the // lowest (market) price and never more than your maximum percentage. For most use // cases, we recommend leaving this field empty. When updating a compute // environment, changing the bid percentage requires an infrastructure update of // the compute environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. BidPercentage *int32 // The desired number of vCPUS in the compute environment. Batch modifies this // value between the minimum and maximum values based on job queue demand. This // parameter isn't applicable to jobs that are running on Fargate resources. Don't // specify it. Batch doesn't support changing the desired number of vCPUs of an // existing compute environment. Don't specify this parameter for compute // environments using Amazon EKS clusters. When you update the desiredvCpus // setting, the value must be between the minvCpus and maxvCpus values. // Additionally, the updated desiredvCpus value must be greater than or equal to // the current desiredvCpus value. For more information, see Troubleshooting Batch (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#error-desired-vcpus-update) // in the Batch User Guide. DesiredvCpus *int32 // Provides information used to select Amazon Machine Images (AMIs) for EC2 // instances in the compute environment. If Ec2Configuration isn't specified, the // default is ECS_AL2 . When updating a compute environment, changing this setting // requires an infrastructure update of the compute environment. For more // information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. To remove the EC2 configuration and any custom AMI ID // specified in imageIdOverride , set this value to an empty string. One or two // values can be provided. This parameter isn't applicable to jobs that are running // on Fargate resources. Don't specify it. Ec2Configuration []Ec2Configuration // The Amazon EC2 key pair that's used for instances launched in the compute // environment. You can use this key pair to log in to your instances with SSH. To // remove the Amazon EC2 key pair, set this value to an empty string. When updating // a compute environment, changing the EC2 key pair requires an infrastructure // update of the compute environment. For more information, see Updating compute // environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. Ec2KeyPair *string // The Amazon Machine Image (AMI) ID used for instances launched in the compute // environment. This parameter is overridden by the imageIdOverride member of the // Ec2Configuration structure. To remove the custom AMI ID and use the default AMI // ID, set this value to an empty string. When updating a compute environment, // changing the AMI ID requires an infrastructure update of the compute // environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. The AMI that you choose for a // compute environment must match the architecture of the instance types that you // intend to use for that compute environment. For example, if your compute // environment uses A1 instance types, the compute resource AMI that you choose // must support ARM instances. Amazon ECS vends both x86 and ARM versions of the // Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon // ECS-optimized Amazon Linux 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html) // in the Amazon Elastic Container Service Developer Guide. ImageId *string // The Amazon ECS instance profile applied to Amazon EC2 instances in a compute // environment. You can specify the short name or full Amazon Resource Name (ARN) // of an instance profile. For example, ecsInstanceRole or // arn:aws:iam:::instance-profile/ecsInstanceRole . For more information, see // Amazon ECS instance role (https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html) // in the Batch User Guide. When updating a compute environment, changing this // setting requires an infrastructure update of the compute environment. For more // information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. InstanceRole *string // The instances types that can be launched. You can specify instance families to // launch any instance type within those families (for example, c5 or p3 ), or you // can specify specific sizes within a family (such as c5.8xlarge ). You can also // choose optimal to select instance types (from the C4, M4, and R4 instance // families) that match the demand of your job queues. When updating a compute // environment, changing this setting requires an infrastructure update of the // compute environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. When you create a compute // environment, the instance types that you select for the compute environment must // share the same architecture. For example, you can't mix x86 and ARM instances in // the same compute environment. Currently, optimal uses instance types from the // C4, M4, and R4 instance families. In Regions that don't have instance types from // those instance families, instance types from the C5, M5, and R5 instance // families are used. InstanceTypes []string // The updated launch template to use for your compute resources. You must specify // either the launch template ID or launch template name in the request, but not // both. For more information, see Launch template support (https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html) // in the Batch User Guide. To remove the custom launch template and use the // default launch template, set launchTemplateId or launchTemplateName member of // the launch template specification to an empty string. Removing the launch // template from a compute environment will not remove the AMI specified in the // launch template. In order to update the AMI specified in a launch template, the // updateToLatestImageVersion parameter must be set to true . When updating a // compute environment, changing the launch template requires an infrastructure // update of the compute environment. For more information, see Updating compute // environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. LaunchTemplate *LaunchTemplateSpecification // The maximum number of Amazon EC2 vCPUs that an environment can reach. With both // BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies using // On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, // Batch might need to exceed maxvCpus to meet your capacity requirements. In this // event, Batch never exceeds maxvCpus by more than a single instance. That is, no // more than a single instance from among those specified in your compute // environment. MaxvCpus *int32 // The minimum number of vCPUs that an environment should maintain (even if the // compute environment is DISABLED ). This parameter isn't applicable to jobs that // are running on Fargate resources. Don't specify it. MinvCpus *int32 // The Amazon EC2 placement group to associate with your compute resources. If you // intend to submit multi-node parallel jobs to your compute environment, you // should consider creating a cluster placement group and associate it with your // compute resources. This keeps your multi-node parallel job on a logical grouping // of instances within a single Availability Zone with high network flow potential. // For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon EC2 User Guide for Linux Instances. When updating a compute // environment, changing the placement group requires an infrastructure update of // the compute environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. PlacementGroup *string // The Amazon EC2 security groups that are associated with instances launched in // the compute environment. This parameter is required for Fargate compute // resources, where it can contain up to 5 security groups. For Fargate compute // resources, providing an empty list is handled as if this parameter wasn't // specified and no change is made. For EC2 compute resources, providing an empty // list removes the security groups from the compute resource. When updating a // compute environment, changing the EC2 security groups requires an infrastructure // update of the compute environment. For more information, see Updating compute // environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. SecurityGroupIds []string // The VPC subnets where the compute resources are launched. Fargate compute // resources can contain up to 16 subnets. For Fargate compute resources, providing // an empty list will be handled as if this parameter wasn't specified and no // change is made. For EC2 compute resources, providing an empty list removes the // VPC subnets from the compute resource. For more information, see VPCs and // subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in // the Amazon VPC User Guide. When updating a compute environment, changing the VPC // subnets requires an infrastructure update of the compute environment. For more // information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. Batch on Amazon EC2 and Batch on Amazon EKS support // Local Zones. For more information, see Local Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones) // in the Amazon EC2 User Guide for Linux Instances, Amazon EKS and Amazon Web // Services Local Zones (https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html) // in the Amazon EKS User Guide and Amazon ECS clusters in Local Zones, Wavelength // Zones, and Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones) // in the Amazon ECS Developer Guide. Batch on Fargate doesn't currently support // Local Zones. Subnets []string // Key-value pair tags to be applied to EC2 resources that are launched in the // compute environment. For Batch, these take the form of "String1": "String2" , // where String1 is the tag key and String2 is the tag value-for example, { // "Name": "Batch Instance - C4OnDemand" } . This is helpful for recognizing your // Batch instances in the Amazon EC2 console. These tags aren't seen when using the // Batch ListTagsForResource API operation. When updating a compute environment, // changing this setting requires an infrastructure update of the compute // environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't specify it. Tags map[string]string // The type of compute environment: EC2 , SPOT , FARGATE , or FARGATE_SPOT . For // more information, see Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) // in the Batch User Guide. If you choose SPOT , you must also specify an Amazon // EC2 Spot Fleet role with the spotIamFleetRole parameter. For more information, // see Amazon EC2 spot fleet role (https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html) // in the Batch User Guide. When updating a compute environment, changing the type // of a compute environment requires an infrastructure update of the compute // environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. Type CRType // Specifies whether the AMI ID is updated to the latest one that's supported by // Batch when the compute environment has an infrastructure update. The default // value is false . An AMI ID can either be specified in the imageId or // imageIdOverride parameters or be determined by the launch template that's // specified in the launchTemplate parameter. If an AMI ID is specified any of // these ways, this parameter is ignored. For more information about to update AMI // IDs during an infrastructure update, see Updating the AMI ID (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html#updating-compute-environments-ami) // in the Batch User Guide. When updating a compute environment, changing this // setting requires an infrastructure update of the compute environment. For more // information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. UpdateToLatestImageVersion *bool noSmithyDocumentSerde } // An object that represents the details of a container that's part of a job. type ContainerDetail struct { // The command that's passed to the container. Command []string // The Amazon Resource Name (ARN) of the container instance that the container is // running on. ContainerInstanceArn *string // The environment variables to pass to a container. Environment variables cannot // start with " AWS_BATCH ". This naming convention is reserved for variables that // Batch sets. Environment []KeyValuePair // The amount of ephemeral storage allocated for the task. This parameter is used // to expand the total amount of ephemeral storage available, beyond the default // amount, for tasks hosted on Fargate. EphemeralStorage *EphemeralStorage // The Amazon Resource Name (ARN) of the execution role that Batch can assume. For // more information, see Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) // in the Batch User Guide. ExecutionRoleArn *string // The exit code to return upon completion. ExitCode *int32 // The platform configuration for jobs that are running on Fargate resources. Jobs // that are running on EC2 resources must not specify this parameter. FargatePlatformConfiguration *FargatePlatformConfiguration // The image used to start the container. Image *string // The instance type of the underlying host infrastructure of a multi-node // parallel job. This parameter isn't applicable to jobs that are running on // Fargate resources. InstanceType *string // The Amazon Resource Name (ARN) that's associated with the job when run. JobRoleArn *string // Linux-specific modifications that are applied to the container, such as details // for device mappings. LinuxParameters *LinuxParameters // The log configuration specification for the container. This parameter maps to // LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/) // . By default, containers use the same logging driver that the Docker daemon // uses. However, the container might use a different logging driver than the // Docker daemon by specifying a log driver with this parameter in the container // definition. To use a different logging driver for a container, the log system // must be configured properly on the container instance. Or, alternatively, it // must be configured on a different log server for remote logging options. For // more information on the options for different supported log drivers, see // Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) // in the Docker documentation. Batch currently supports a subset of the logging // drivers available to the Docker daemon (shown in the LogConfiguration data // type). Additional log drivers might be available in future releases of the // Amazon ECS container agent. This parameter requires version 1.18 of the Docker // Remote API or greater on your container instance. To check the Docker Remote API // version on your container instance, log in to your container instance and run // the following command: sudo docker version | grep "Server API version" The // Amazon ECS container agent running on a container instance must register the // logging drivers available on that instance with the // ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on // that instance can use these log configuration options. For more information, see // Amazon ECS container agent configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) // in the Amazon Elastic Container Service Developer Guide. LogConfiguration *LogConfiguration // The name of the Amazon CloudWatch Logs log stream that's associated with the // container. The log group for Batch jobs is /aws/batch/job . Each container // attempt receives a log stream name when they reach the RUNNING status. LogStreamName *string // For jobs running on EC2 resources that didn't specify memory requirements using // resourceRequirements , the number of MiB of memory reserved for the job. For // other jobs, including all run on Fargate resources, see resourceRequirements . Memory *int32 // The mount points for data volumes in your container. MountPoints []MountPoint // The network configuration for jobs that are running on Fargate resources. Jobs // that are running on EC2 resources must not specify this parameter. NetworkConfiguration *NetworkConfiguration // The network interfaces that are associated with the job. NetworkInterfaces []NetworkInterface // When this parameter is true, the container is given elevated permissions on the // host container instance (similar to the root user). The default value is false . // This parameter isn't applicable to jobs that are running on Fargate resources // and shouldn't be provided, or specified as false . Privileged *bool // When this parameter is true, the container is given read-only access to its // root file system. This parameter maps to ReadonlyRootfs in the Create a // container (https://docs.docker.com/engine/api/v1.23/#create-a-container) section // of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the // --read-only option to docker run (https://docs.docker.com/engine/reference/commandline/run/) // . ReadonlyRootFilesystem *bool // A short (255 max characters) human-readable string to provide additional // details for a running or stopped container. Reason *string // The type and amount of resources to assign to a container. The supported // resources include GPU , MEMORY , and VCPU . ResourceRequirements []ResourceRequirement // An object that represents the compute environment architecture for Batch jobs // on Fargate. RuntimePlatform *RuntimePlatform // The secrets to pass to the container. For more information, see Specifying // sensitive data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) // in the Batch User Guide. Secrets []Secret // The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with // the container job. Each container attempt receives a task ARN when they reach // the STARTING status. TaskArn *string // A list of ulimit values to set in the container. This parameter maps to Ulimits // in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/) // . This parameter isn't applicable to jobs that are running on Fargate resources. Ulimits []Ulimit // The user name to use inside the container. This parameter maps to User in the // Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --user option to docker run (https://docs.docker.com/engine/reference/run/) // . User *string // The number of vCPUs reserved for the container. For jobs that run on EC2 // resources, you can specify the vCPU requirement for the job using // resourceRequirements , but you can't specify the vCPU requirements in both the // vcpus and resourceRequirements object. This parameter maps to CpuShares in the // Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/) // . Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one // vCPU. This is required but can be specified in several places. It must be // specified for each node at least once. This parameter isn't applicable to jobs // that run on Fargate resources. For jobs that run on Fargate resources, you must // specify the vCPU requirement for the job using resourceRequirements . Vcpus *int32 // A list of volumes that are associated with the job. Volumes []Volume noSmithyDocumentSerde } // The overrides that should be sent to a container. For information about using // Batch overrides when you connect event sources to targets, see // BatchContainerOverrides (https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_BatchContainerOverrides.html) // . type ContainerOverrides struct { // The command to send to the container that overrides the default command from // the Docker image or the job definition. This parameter can't contain an empty // string. Command []string // The environment variables to send to the container. You can add new environment // variables, which are added to the container at launch, or you can override the // existing environment variables from the Docker image or the job definition. // Environment variables cannot start with " AWS_BATCH ". This naming convention is // reserved for variables that Batch sets. Environment []KeyValuePair // The instance type to use for a multi-node parallel job. This parameter isn't // applicable to single-node container jobs or jobs that run on Fargate resources, // and shouldn't be provided. InstanceType *string // This parameter is deprecated, use resourceRequirements to override the memory // requirements specified in the job definition. It's not supported for jobs // running on Fargate resources. For jobs that run on EC2 resources, it overrides // the memory parameter set in the job definition, but doesn't override any memory // requirement that's specified in the resourceRequirements structure in the job // definition. To override memory requirements that are specified in the // resourceRequirements structure in the job definition, resourceRequirements must // be specified in the SubmitJob request, with type set to MEMORY and value set to // the new value. For more information, see Can't override job definition resource // requirements (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements) // in the Batch User Guide. // // Deprecated: This field is deprecated, use resourceRequirements instead. Memory *int32 // The type and amount of resources to assign to a container. This overrides the // settings in the job definition. The supported resources include GPU , MEMORY , // and VCPU . ResourceRequirements []ResourceRequirement // This parameter is deprecated, use resourceRequirements to override the vcpus // parameter that's set in the job definition. It's not supported for jobs running // on Fargate resources. For jobs that run on EC2 resources, it overrides the vcpus // parameter set in the job definition, but doesn't override any vCPU requirement // specified in the resourceRequirements structure in the job definition. To // override vCPU requirements that are specified in the resourceRequirements // structure in the job definition, resourceRequirements must be specified in the // SubmitJob request, with type set to VCPU and value set to the new value. For // more information, see Can't override job definition resource requirements (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements) // in the Batch User Guide. // // Deprecated: This field is deprecated, use resourceRequirements instead. Vcpus *int32 noSmithyDocumentSerde } // Container properties are used for Amazon ECS based job definitions. These // properties to describe the container that's launched as part of a job. type ContainerProperties struct { // The command that's passed to the container. This parameter maps to Cmd in the // Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the COMMAND parameter to docker run (https://docs.docker.com/engine/reference/run/) // . For more information, see // https://docs.docker.com/engine/reference/builder/#cmd (https://docs.docker.com/engine/reference/builder/#cmd) // . Command []string // The environment variables to pass to a container. This parameter maps to Env in // the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --env option to docker run (https://docs.docker.com/engine/reference/run/) // . We don't recommend using plaintext environment variables for sensitive // information, such as credential data. Environment variables cannot start with " // AWS_BATCH ". This naming convention is reserved for variables that Batch sets. Environment []KeyValuePair // The amount of ephemeral storage to allocate for the task. This parameter is // used to expand the total amount of ephemeral storage available, beyond the // default amount, for tasks hosted on Fargate. EphemeralStorage *EphemeralStorage // The Amazon Resource Name (ARN) of the execution role that Batch can assume. For // jobs that run on Fargate resources, you must provide an execution role. For more // information, see Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) // in the Batch User Guide. ExecutionRoleArn *string // The platform configuration for jobs that are running on Fargate resources. Jobs // that are running on EC2 resources must not specify this parameter. FargatePlatformConfiguration *FargatePlatformConfiguration // The image used to start a container. This string is passed directly to the // Docker daemon. Images in the Docker Hub registry are available by default. Other // repositories are specified with repository-url/image:tag . It can be 255 // characters long. It can contain uppercase and lowercase letters, numbers, // hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and // number signs (#). This parameter maps to Image in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the IMAGE parameter of docker run (https://docs.docker.com/engine/reference/run/) // . Docker image architecture must match the processor architecture of the compute // resources that they're scheduled on. For example, ARM-based Docker images can // only run on ARM-based compute resources. // - Images in Amazon ECR Public repositories use the full // registry/repository[:tag] or registry/repository[@digest] naming conventions. // For example, public.ecr.aws/registry_alias/my-web-app:latest . // - Images in Amazon ECR repositories use the full registry and repository URI // (for example, 123456789012.dkr.ecr..amazonaws.com/ ). // - Images in official repositories on Docker Hub use a single name (for // example, ubuntu or mongo ). // - Images in other repositories on Docker Hub are qualified with an // organization name (for example, amazon/amazon-ecs-agent ). // - Images in other online repositories are qualified further by a domain name // (for example, quay.io/assemblyline/ubuntu ). Image *string // The instance type to use for a multi-node parallel job. All node groups in a // multi-node parallel job must use the same instance type. This parameter isn't // applicable to single-node container jobs or jobs that run on Fargate resources, // and shouldn't be provided. InstanceType *string // The Amazon Resource Name (ARN) of the IAM role that the container can assume // for Amazon Web Services permissions. For more information, see IAM roles for // tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) // in the Amazon Elastic Container Service Developer Guide. JobRoleArn *string // Linux-specific modifications that are applied to the container, such as details // for device mappings. LinuxParameters *LinuxParameters // The log configuration specification for the container. This parameter maps to // LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/) // . By default, containers use the same logging driver that the Docker daemon // uses. However the container might use a different logging driver than the Docker // daemon by specifying a log driver with this parameter in the container // definition. To use a different logging driver for a container, the log system // must be configured properly on the container instance (or on a different log // server for remote logging options). For more information on the options for // different supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) // in the Docker documentation. Batch currently supports a subset of the logging // drivers available to the Docker daemon (shown in the LogConfiguration data // type). This parameter requires version 1.18 of the Docker Remote API or greater // on your container instance. To check the Docker Remote API version on your // container instance, log in to your container instance and run the following // command: sudo docker version | grep "Server API version" The Amazon ECS // container agent running on a container instance must register the logging // drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS // environment variable before containers placed on that instance can use these log // configuration options. For more information, see Amazon ECS container agent // configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) // in the Amazon Elastic Container Service Developer Guide. LogConfiguration *LogConfiguration // This parameter is deprecated, use resourceRequirements to specify the memory // requirements for the job definition. It's not supported for jobs running on // Fargate resources. For jobs that run on EC2 resources, it specifies the memory // hard limit (in MiB) for a container. If your container attempts to exceed the // specified number, it's terminated. You must specify at least 4 MiB of memory for // a job using this parameter. The memory hard limit can be specified in several // places. It must be specified for each node at least once. // // Deprecated: This field is deprecated, use resourceRequirements instead. Memory *int32 // The mount points for data volumes in your container. This parameter maps to // Volumes in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --volume option to docker run (https://docs.docker.com/engine/reference/run/) // . MountPoints []MountPoint // The network configuration for jobs that are running on Fargate resources. Jobs // that are running on EC2 resources must not specify this parameter. NetworkConfiguration *NetworkConfiguration // When this parameter is true, the container is given elevated permissions on the // host container instance (similar to the root user). This parameter maps to // Privileged in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --privileged option to docker run (https://docs.docker.com/engine/reference/run/) // . The default value is false. This parameter isn't applicable to jobs that are // running on Fargate resources and shouldn't be provided, or specified as false. Privileged *bool // When this parameter is true, the container is given read-only access to its // root file system. This parameter maps to ReadonlyRootfs in the Create a // container (https://docs.docker.com/engine/api/v1.23/#create-a-container) section // of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the // --read-only option to docker run . ReadonlyRootFilesystem *bool // The type and amount of resources to assign to a container. The supported // resources include GPU , MEMORY , and VCPU . ResourceRequirements []ResourceRequirement // An object that represents the compute environment architecture for Batch jobs // on Fargate. RuntimePlatform *RuntimePlatform // The secrets for the container. For more information, see Specifying sensitive // data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) // in the Batch User Guide. Secrets []Secret // A list of ulimits to set in the container. This parameter maps to Ulimits in // the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/) // . This parameter isn't applicable to jobs that are running on Fargate resources // and shouldn't be provided. Ulimits []Ulimit // The user name to use inside the container. This parameter maps to User in the // Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --user option to docker run (https://docs.docker.com/engine/reference/run/) // . User *string // This parameter is deprecated, use resourceRequirements to specify the vCPU // requirements for the job definition. It's not supported for jobs running on // Fargate resources. For jobs running on EC2 resources, it specifies the number of // vCPUs reserved for the job. Each vCPU is equivalent to 1,024 CPU shares. This // parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/) // . The number of vCPUs must be specified but can be specified in several places. // You must specify it at least once for each node. // // Deprecated: This field is deprecated, use resourceRequirements instead. Vcpus *int32 // A list of data volumes used in a job. Volumes []Volume noSmithyDocumentSerde } // An object that represents summary details of a container within a job. type ContainerSummary struct { // The exit code to return upon completion. ExitCode *int32 // A short (255 max characters) human-readable string to provide additional // details for a running or stopped container. Reason *string noSmithyDocumentSerde } // An object that represents a container instance host device. This object isn't // applicable to jobs that are running on Fargate resources and shouldn't be // provided. type Device struct { // The path for the device on the host container instance. // // This member is required. HostPath *string // The path inside the container that's used to expose the host device. By // default, the hostPath value is used. ContainerPath *string // The explicit permissions to provide to the container for the device. By // default, the container has permissions for read , write , and mknod for the // device. Permissions []DeviceCgroupPermission noSmithyDocumentSerde } // Provides information used to select Amazon Machine Images (AMIs) for instances // in the compute environment. If Ec2Configuration isn't specified, the default is // ECS_AL2 ( Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami) // ). This object isn't applicable to jobs that are running on Fargate resources. type Ec2Configuration struct { // The image type to match with the instance type to select an AMI. The supported // values are different for ECS and EKS resources. ECS If the imageIdOverride // parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami) // ( ECS_AL2 ) is used. If a new image type is specified in an update, but neither // an imageId nor a imageIdOverride parameter is specified, then the latest Amazon // ECS optimized AMI for that image type that's supported by Batch is used. ECS_AL2 // Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami) // : Default for all non-GPU instance families. ECS_AL2_NVIDIA Amazon Linux 2 (GPU) (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami) // : Default for all GPU instance families (for example P4 and G4 ) and can be used // for all non Amazon Web Services Graviton-based instance types. ECS_AL1 Amazon // Linux (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami) // . Amazon Linux has reached the end-of-life of standard support. For more // information, see Amazon Linux AMI (http://aws.amazon.com/amazon-linux-ami/) . // EKS If the imageIdOverride parameter isn't specified, then a recent Amazon // EKS-optimized Amazon Linux AMI (https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) // ( EKS_AL2 ) is used. If a new image type is specified in an update, but neither // an imageId nor a imageIdOverride parameter is specified, then the latest Amazon // EKS optimized AMI for that image type that Batch supports is used. EKS_AL2 // Amazon Linux 2 (https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) // : Default for all non-GPU instance families. EKS_AL2_NVIDIA Amazon Linux 2 // (accelerated) (https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) // : Default for all GPU instance families (for example, P4 and G4 ) and can be // used for all non Amazon Web Services Graviton-based instance types. // // This member is required. ImageType *string // The AMI ID used for instances launched in the compute environment that match // the image type. This setting overrides the imageId set in the computeResource // object. The AMI that you choose for a compute environment must match the // architecture of the instance types that you intend to use for that compute // environment. For example, if your compute environment uses A1 instance types, // the compute resource AMI that you choose must support ARM instances. Amazon ECS // vends both x86 and ARM versions of the Amazon ECS-optimized Amazon Linux 2 AMI. // For more information, see Amazon ECS-optimized Amazon Linux 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html) // in the Amazon Elastic Container Service Developer Guide. ImageIdOverride *string // The Kubernetes version for the compute environment. If you don't specify a // value, the latest version that Batch supports is used. ImageKubernetesVersion *string noSmithyDocumentSerde } // The authorization configuration details for the Amazon EFS file system. type EFSAuthorizationConfig struct { // The Amazon EFS access point ID to use. If an access point is specified, the // root directory value specified in the EFSVolumeConfiguration must either be // omitted or set to / which enforces the path set on the EFS access point. If an // access point is used, transit encryption must be enabled in the // EFSVolumeConfiguration . For more information, see Working with Amazon EFS // access points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) // in the Amazon Elastic File System User Guide. AccessPointId *string // Whether or not to use the Batch job IAM role defined in a job definition when // mounting the Amazon EFS file system. If enabled, transit encryption must be // enabled in the EFSVolumeConfiguration . If this parameter is omitted, the // default value of DISABLED is used. For more information, see Using Amazon EFS // access points (https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) // in the Batch User Guide. EFS IAM authorization requires that TransitEncryption // be ENABLED and that a JobRoleArn is specified. Iam EFSAuthorizationConfigIAM noSmithyDocumentSerde } // This is used when you're using an Amazon Elastic File System file system for // job storage. For more information, see Amazon EFS Volumes (https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) // in the Batch User Guide. type EFSVolumeConfiguration struct { // The Amazon EFS file system ID to use. // // This member is required. FileSystemId *string // The authorization configuration details for the Amazon EFS file system. AuthorizationConfig *EFSAuthorizationConfig // The directory within the Amazon EFS file system to mount as the root directory // inside the host. If this parameter is omitted, the root of the Amazon EFS volume // is used instead. Specifying / has the same effect as omitting this parameter. // The maximum length is 4,096 characters. If an EFS access point is specified in // the authorizationConfig , the root directory parameter must either be omitted or // set to / , which enforces the path set on the Amazon EFS access point. RootDirectory *string // Determines whether to enable encryption for Amazon EFS data in transit between // the Amazon ECS host and the Amazon EFS server. Transit encryption must be // enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, // the default value of DISABLED is used. For more information, see Encrypting // data in transit (https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) // in the Amazon Elastic File System User Guide. TransitEncryption EFSTransitEncryption // The port to use when sending encrypted data between the Amazon ECS host and the // Amazon EFS server. If you don't specify a transit encryption port, it uses the // port selection strategy that the Amazon EFS mount helper uses. The value must be // between 0 and 65,535. For more information, see EFS mount helper (https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) // in the Amazon Elastic File System User Guide. TransitEncryptionPort *int32 noSmithyDocumentSerde } // An object that represents the details for an attempt for a job attempt that an // Amazon EKS container runs. type EksAttemptContainerDetail struct { // The exit code for the job attempt. A non-zero exit code is considered failed. ExitCode *int32 // A short (255 max characters) human-readable string to provide additional // details for a running or stopped container. Reason *string noSmithyDocumentSerde } // An object that represents the details of a job attempt for a job attempt by an // Amazon EKS container. type EksAttemptDetail struct { // The details for the final status of the containers for this job attempt. Containers []EksAttemptContainerDetail // The name of the node for this job attempt. NodeName *string // The name of the pod for this job attempt. PodName *string // The Unix timestamp (in milliseconds) for when the attempt was started (when the // attempt transitioned from the STARTING state to the RUNNING state). StartedAt *int64 // A short, human-readable string to provide additional details for the current // status of the job attempt. StatusReason *string // The Unix timestamp (in milliseconds) for when the attempt was stopped. This // happens when the attempt transitioned from the RUNNING state to a terminal // state, such as SUCCEEDED or FAILED . StoppedAt *int64 noSmithyDocumentSerde } // Configuration for the Amazon EKS cluster that supports the Batch compute // environment. The cluster must exist before the compute environment can be // created. type EksConfiguration struct { // The Amazon Resource Name (ARN) of the Amazon EKS cluster. An example is // arn:aws:eks:us-east-1:123456789012:cluster/ClusterForBatch . // // This member is required. EksClusterArn *string // The namespace of the Amazon EKS cluster. Batch manages pods in this namespace. // The value can't left empty or null. It must be fewer than 64 characters long, // can't be set to default , can't start with " kube- ," and must match this // regular expression: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ . For more information, see // Namespaces (https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) // in the Kubernetes documentation. // // This member is required. KubernetesNamespace *string noSmithyDocumentSerde } // EKS container properties are used in job definitions for Amazon EKS based job // definitions to describe the properties for a container node in the pod that's // launched as part of a job. This can't be specified for Amazon ECS based job // definitions. type EksContainer struct { // The Docker image used to start the container. // // This member is required. Image *string // An array of arguments to the entrypoint. If this isn't specified, the CMD of // the container image is used. This corresponds to the args member in the // Entrypoint (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) // portion of the Pod (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) // in Kubernetes. Environment variable references are expanded using the // container's environment. If the referenced environment variable doesn't exist, // the reference in the command isn't changed. For example, if the reference is to // " $(NAME1) " and the NAME1 environment variable doesn't exist, the command // string will remain " $(NAME1) ." $$ is replaced with $ , and the resulting // string isn't expanded. For example, $$(VAR_NAME) is passed as $(VAR_NAME) // whether or not the VAR_NAME environment variable exists. For more information, // see CMD (https://docs.docker.com/engine/reference/builder/#cmd) in the // Dockerfile reference and Define a command and arguments for a pod (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) // in the Kubernetes documentation. Args []string // The entrypoint for the container. This isn't run within a shell. If this isn't // specified, the ENTRYPOINT of the container image is used. Environment variable // references are expanded using the container's environment. If the referenced // environment variable doesn't exist, the reference in the command isn't changed. // For example, if the reference is to " $(NAME1) " and the NAME1 environment // variable doesn't exist, the command string will remain " $(NAME1) ." $$ is // replaced with $ and the resulting string isn't expanded. For example, // $$(VAR_NAME) will be passed as $(VAR_NAME) whether or not the VAR_NAME // environment variable exists. The entrypoint can't be updated. For more // information, see ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint) // in the Dockerfile reference and Define a command and arguments for a container (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) // and Entrypoint (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) // in the Kubernetes documentation. Command []string // The environment variables to pass to a container. Environment variables cannot // start with " AWS_BATCH ". This naming convention is reserved for variables that // Batch sets. Env []EksContainerEnvironmentVariable // The image pull policy for the container. Supported values are Always , // IfNotPresent , and Never . This parameter defaults to IfNotPresent . However, if // the :latest tag is specified, it defaults to Always . For more information, see // Updating images (https://kubernetes.io/docs/concepts/containers/images/#updating-images) // in the Kubernetes documentation. ImagePullPolicy *string // The name of the container. If the name isn't specified, the default name " // Default " is used. Each container in a pod must have a unique name. Name *string // The type and amount of resources to assign to a container. The supported // resources include memory , cpu , and nvidia.com/gpu . For more information, see // Resource management for pods and containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) // in the Kubernetes documentation. Resources *EksContainerResourceRequirements // The security context for a job. For more information, see Configure a security // context for a pod or container (https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) // in the Kubernetes documentation. SecurityContext *EksContainerSecurityContext // The volume mounts for the container. Batch supports emptyDir , hostPath , and // secret volume types. For more information about volumes and volume mounts in // Kubernetes, see Volumes (https://kubernetes.io/docs/concepts/storage/volumes/) // in the Kubernetes documentation. VolumeMounts []EksContainerVolumeMount noSmithyDocumentSerde } // The details for container properties that are returned by DescribeJobs for jobs // that use Amazon EKS. type EksContainerDetail struct { // An array of arguments to the entrypoint. If this isn't specified, the CMD of // the container image is used. This corresponds to the args member in the // Entrypoint (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) // portion of the Pod (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) // in Kubernetes. Environment variable references are expanded using the // container's environment. If the referenced environment variable doesn't exist, // the reference in the command isn't changed. For example, if the reference is to // " $(NAME1) " and the NAME1 environment variable doesn't exist, the command // string will remain " $(NAME1) ". $$ is replaced with $ and the resulting string // isn't expanded. For example, $$(VAR_NAME) is passed as $(VAR_NAME) whether or // not the VAR_NAME environment variable exists. For more information, see CMD (https://docs.docker.com/engine/reference/builder/#cmd) // in the Dockerfile reference and Define a command and arguments for a pod (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) // in the Kubernetes documentation. Args []string // The entrypoint for the container. For more information, see Entrypoint (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) // in the Kubernetes documentation. Command []string // The environment variables to pass to a container. Environment variables cannot // start with " AWS_BATCH ". This naming convention is reserved for variables that // Batch sets. Env []EksContainerEnvironmentVariable // The exit code for the job attempt. A non-zero exit code is considered failed. ExitCode *int32 // The Docker image used to start the container. Image *string // The image pull policy for the container. Supported values are Always , // IfNotPresent , and Never . This parameter defaults to Always if the :latest tag // is specified, IfNotPresent otherwise. For more information, see Updating images (https://kubernetes.io/docs/concepts/containers/images/#updating-images) // in the Kubernetes documentation. ImagePullPolicy *string // The name of the container. If the name isn't specified, the default name " // Default " is used. Each container in a pod must have a unique name. Name *string // A short human-readable string to provide additional details for a running or // stopped container. It can be up to 255 characters long. Reason *string // The type and amount of resources to assign to a container. The supported // resources include memory , cpu , and nvidia.com/gpu . For more information, see // Resource management for pods and containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) // in the Kubernetes documentation. Resources *EksContainerResourceRequirements // The security context for a job. For more information, see Configure a security // context for a pod or container (https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) // in the Kubernetes documentation. SecurityContext *EksContainerSecurityContext // The volume mounts for the container. Batch supports emptyDir , hostPath , and // secret volume types. For more information about volumes and volume mounts in // Kubernetes, see Volumes (https://kubernetes.io/docs/concepts/storage/volumes/) // in the Kubernetes documentation. VolumeMounts []EksContainerVolumeMount noSmithyDocumentSerde } // An environment variable. type EksContainerEnvironmentVariable struct { // The name of the environment variable. // // This member is required. Name *string // The value of the environment variable. Value *string noSmithyDocumentSerde } // Object representing any Kubernetes overrides to a job definition that's used in // a SubmitJob API operation. type EksContainerOverride struct { // The arguments to the entrypoint to send to the container that overrides the // default arguments from the Docker image or the job definition. For more // information, see CMD (https://docs.docker.com/engine/reference/builder/#cmd) in // the Dockerfile reference and Define a command an arguments for a pod (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) // in the Kubernetes documentation. Args []string // The command to send to the container that overrides the default command from // the Docker image or the job definition. Command []string // The environment variables to send to the container. You can add new environment // variables, which are added to the container at launch. Or, you can override the // existing environment variables from the Docker image or the job definition. // Environment variables cannot start with " AWS_BATCH ". This naming convention is // reserved for variables that Batch sets. Env []EksContainerEnvironmentVariable // The override of the Docker image that's used to start the container. Image *string // The type and amount of resources to assign to a container. These override the // settings in the job definition. The supported resources include memory , cpu , // and nvidia.com/gpu . For more information, see Resource management for pods and // containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) // in the Kubernetes documentation. Resources *EksContainerResourceRequirements noSmithyDocumentSerde } // The type and amount of resources to assign to a container. The supported // resources include memory , cpu , and nvidia.com/gpu . For more information, see // Resource management for pods and containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) // in the Kubernetes documentation. type EksContainerResourceRequirements struct { // The type and quantity of the resources to reserve for the container. The values // vary based on the name that's specified. Resources can be requested using // either the limits or the requests objects. memory The memory hard limit (in // MiB) for the container, using whole integers, with a "Mi" suffix. If your // container attempts to exceed the memory specified, the container is terminated. // You must specify at least 4 MiB of memory for a job. memory can be specified in // limits , requests , or both. If memory is specified in both places, then the // value that's specified in limits must be equal to the value that's specified in // requests . To maximize your resource utilization, provide your jobs with as much // memory as possible for the specific instance type that you are using. To learn // how, see Memory management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) // in the Batch User Guide. cpu The number of CPUs that's reserved for the // container. Values must be an even multiple of 0.25 . cpu can be specified in // limits , requests , or both. If cpu is specified in both places, then the value // that's specified in limits must be at least as large as the value that's // specified in requests . nvidia.com/gpu The number of GPUs that's reserved for // the container. Values must be a whole integer. memory can be specified in limits // , requests , or both. If memory is specified in both places, then the value // that's specified in limits must be equal to the value that's specified in // requests . Limits map[string]string // The type and quantity of the resources to request for the container. The values // vary based on the name that's specified. Resources can be requested by using // either the limits or the requests objects. memory The memory hard limit (in // MiB) for the container, using whole integers, with a "Mi" suffix. If your // container attempts to exceed the memory specified, the container is terminated. // You must specify at least 4 MiB of memory for a job. memory can be specified in // limits , requests , or both. If memory is specified in both, then the value // that's specified in limits must be equal to the value that's specified in // requests . If you're trying to maximize your resource utilization by providing // your jobs as much memory as possible for a particular instance type, see Memory // management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) // in the Batch User Guide. cpu The number of CPUs that are reserved for the // container. Values must be an even multiple of 0.25 . cpu can be specified in // limits , requests , or both. If cpu is specified in both, then the value that's // specified in limits must be at least as large as the value that's specified in // requests . nvidia.com/gpu The number of GPUs that are reserved for the // container. Values must be a whole integer. nvidia.com/gpu can be specified in // limits , requests , or both. If nvidia.com/gpu is specified in both, then the // value that's specified in limits must be equal to the value that's specified in // requests . Requests map[string]string noSmithyDocumentSerde } // The security context for a job. For more information, see Configure a security // context for a pod or container (https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) // in the Kubernetes documentation. type EksContainerSecurityContext struct { // When this parameter is true , the container is given elevated permissions on the // host container instance. The level of permissions are similar to the root user // permissions. The default value is false . This parameter maps to privileged // policy in the Privileged pod security policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#privileged) // in the Kubernetes documentation. Privileged *bool // When this parameter is true , the container is given read-only access to its // root file system. The default value is false . This parameter maps to // ReadOnlyRootFilesystem policy in the Volumes and file systems pod security // policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#volumes-and-file-systems) // in the Kubernetes documentation. ReadOnlyRootFilesystem *bool // When this parameter is specified, the container is run as the specified group // ID ( gid ). If this parameter isn't specified, the default is the group that's // specified in the image metadata. This parameter maps to RunAsGroup and MustRunAs // policy in the Users and groups pod security policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#users-and-groups) // in the Kubernetes documentation. RunAsGroup *int64 // When this parameter is specified, the container is run as a user with a uid // other than 0. If this parameter isn't specified, so such rule is enforced. This // parameter maps to RunAsUser and MustRunAsNonRoot policy in the Users and groups // pod security policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#users-and-groups) // in the Kubernetes documentation. RunAsNonRoot *bool // When this parameter is specified, the container is run as the specified user ID // ( uid ). If this parameter isn't specified, the default is the user that's // specified in the image metadata. This parameter maps to RunAsUser and MustRanAs // policy in the Users and groups pod security policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#users-and-groups) // in the Kubernetes documentation. RunAsUser *int64 noSmithyDocumentSerde } // The volume mounts for a container for an Amazon EKS job. For more information // about volumes and volume mounts in Kubernetes, see Volumes (https://kubernetes.io/docs/concepts/storage/volumes/) // in the Kubernetes documentation. type EksContainerVolumeMount struct { // The path on the container where the volume is mounted. MountPath *string // The name the volume mount. This must match the name of one of the volumes in // the pod. Name *string // If this value is true , the container has read-only access to the volume. // Otherwise, the container can write to the volume. The default value is false . ReadOnly *bool noSmithyDocumentSerde } // Specifies the configuration of a Kubernetes emptyDir volume. An emptyDir volume // is first created when a pod is assigned to a node. It exists as long as that pod // is running on that node. The emptyDir volume is initially empty. All containers // in the pod can read and write the files in the emptyDir volume. However, the // emptyDir volume can be mounted at the same or different paths in each container. // When a pod is removed from a node for any reason, the data in the emptyDir is // deleted permanently. For more information, see emptyDir (https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) // in the Kubernetes documentation. type EksEmptyDir struct { // The medium to store the volume. The default value is an empty string, which // uses the storage of the node. "" (Default) Use the disk storage of the node. // "Memory" Use the tmpfs volume that's backed by the RAM of the node. Contents of // the volume are lost when the node reboots, and any storage on the volume counts // against the container's memory limit. Medium *string // The maximum size of the volume. By default, there's no maximum size defined. SizeLimit *string noSmithyDocumentSerde } // Specifies the configuration of a Kubernetes hostPath volume. A hostPath volume // mounts an existing file or directory from the host node's filesystem into your // pod. For more information, see hostPath (https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) // in the Kubernetes documentation. type EksHostPath struct { // The path of the file or directory on the host to mount into containers on the // pod. Path *string noSmithyDocumentSerde } // Describes and uniquely identifies Kubernetes resources. For example, the // compute environment that a pod runs in or the jobID for a job running in the // pod. For more information, see Understanding Kubernetes Objects (https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) // in the Kubernetes documentation. type EksMetadata struct { // Key-value pairs used to identify, sort, and organize cube resources. Can // contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and // underscores (_). Labels can be added or modified at any time. Each resource can // have multiple labels, but each key must be unique for a given object. Labels map[string]string noSmithyDocumentSerde } // The properties for the pod. type EksPodProperties struct { // The properties of the container that's used on the Amazon EKS pod. Containers []EksContainer // The DNS policy for the pod. The default value is ClusterFirst . If the // hostNetwork parameter is not specified, the default is ClusterFirstWithHostNet . // ClusterFirst indicates that any DNS query that does not match the configured // cluster domain suffix is forwarded to the upstream nameserver inherited from the // node. For more information, see Pod's DNS policy (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) // in the Kubernetes documentation. Valid values: Default | ClusterFirst | // ClusterFirstWithHostNet DnsPolicy *string // Indicates if the pod uses the hosts' network IP address. The default value is // true . Setting this to false enables the Kubernetes pod networking model. Most // Batch workloads are egress-only and don't require the overhead of IP allocation // for each pod for incoming connections. For more information, see Host namespaces (https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) // and Pod networking (https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) // in the Kubernetes documentation. HostNetwork *bool // Metadata about the Kubernetes pod. For more information, see Understanding // Kubernetes Objects (https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) // in the Kubernetes documentation. Metadata *EksMetadata // The name of the service account that's used to run the pod. For more // information, see Kubernetes service accounts (https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) // and Configure a Kubernetes service account to assume an IAM role (https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) // in the Amazon EKS User Guide and Configure service accounts for pods (https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) // in the Kubernetes documentation. ServiceAccountName *string // Specifies the volumes for a job definition that uses Amazon EKS resources. Volumes []EksVolume noSmithyDocumentSerde } // The details for the pod. type EksPodPropertiesDetail struct { // The properties of the container that's used on the Amazon EKS pod. Containers []EksContainerDetail // The DNS policy for the pod. The default value is ClusterFirst . If the // hostNetwork parameter is not specified, the default is ClusterFirstWithHostNet . // ClusterFirst indicates that any DNS query that does not match the configured // cluster domain suffix is forwarded to the upstream nameserver inherited from the // node. If no value was specified for dnsPolicy in the RegisterJobDefinition (https://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) // API operation, then no value will be returned for dnsPolicy by either of // DescribeJobDefinitions (https://docs.aws.amazon.com/batch/latest/APIReference/API_DescribeJobDefinitions.html) // or DescribeJobs (https://docs.aws.amazon.com/batch/latest/APIReference/API_DescribeJobs.html) // API operations. The pod spec setting will contain either ClusterFirst or // ClusterFirstWithHostNet , depending on the value of the hostNetwork parameter. // For more information, see Pod's DNS policy (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) // in the Kubernetes documentation. Valid values: Default | ClusterFirst | // ClusterFirstWithHostNet DnsPolicy *string // Indicates if the pod uses the hosts' network IP address. The default value is // true . Setting this to false enables the Kubernetes pod networking model. Most // Batch workloads are egress-only and don't require the overhead of IP allocation // for each pod for incoming connections. For more information, see Host namespaces (https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) // and Pod networking (https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) // in the Kubernetes documentation. HostNetwork *bool // Describes and uniquely identifies Kubernetes resources. For example, the // compute environment that a pod runs in or the jobID for a job running in the // pod. For more information, see Understanding Kubernetes Objects (https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) // in the Kubernetes documentation. Metadata *EksMetadata // The name of the node for this job. NodeName *string // The name of the pod for this job. PodName *string // The name of the service account that's used to run the pod. For more // information, see Kubernetes service accounts (https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) // and Configure a Kubernetes service account to assume an IAM role (https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) // in the Amazon EKS User Guide and Configure service accounts for pods (https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) // in the Kubernetes documentation. ServiceAccountName *string // Specifies the volumes for a job definition using Amazon EKS resources. Volumes []EksVolume noSmithyDocumentSerde } // An object that contains overrides for the Kubernetes pod properties of a job. type EksPodPropertiesOverride struct { // The overrides for the container that's used on the Amazon EKS pod. Containers []EksContainerOverride // Metadata about the overrides for the container that's used on the Amazon EKS // pod. Metadata *EksMetadata noSmithyDocumentSerde } // An object that contains the properties for the Kubernetes resources of a job. type EksProperties struct { // The properties for the Kubernetes pod resources of a job. PodProperties *EksPodProperties noSmithyDocumentSerde } // An object that contains the details for the Kubernetes resources of a job. type EksPropertiesDetail struct { // The properties for the Kubernetes pod resources of a job. PodProperties *EksPodPropertiesDetail noSmithyDocumentSerde } // An object that contains overrides for the Kubernetes resources of a job. type EksPropertiesOverride struct { // The overrides for the Kubernetes pod resources of a job. PodProperties *EksPodPropertiesOverride noSmithyDocumentSerde } // Specifies the configuration of a Kubernetes secret volume. For more // information, see secret (https://kubernetes.io/docs/concepts/storage/volumes/#secret) // in the Kubernetes documentation. type EksSecret struct { // The name of the secret. The name must be allowed as a DNS subdomain name. For // more information, see DNS subdomain names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) // in the Kubernetes documentation. // // This member is required. SecretName *string // Specifies whether the secret or the secret's keys must be defined. Optional *bool noSmithyDocumentSerde } // Specifies an Amazon EKS volume for a job definition. type EksVolume struct { // The name of the volume. The name must be allowed as a DNS subdomain name. For // more information, see DNS subdomain names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) // in the Kubernetes documentation. // // This member is required. Name *string // Specifies the configuration of a Kubernetes emptyDir volume. For more // information, see emptyDir (https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) // in the Kubernetes documentation. EmptyDir *EksEmptyDir // Specifies the configuration of a Kubernetes hostPath volume. For more // information, see hostPath (https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) // in the Kubernetes documentation. HostPath *EksHostPath // Specifies the configuration of a Kubernetes secret volume. For more // information, see secret (https://kubernetes.io/docs/concepts/storage/volumes/#secret) // in the Kubernetes documentation. Secret *EksSecret noSmithyDocumentSerde } // The amount of ephemeral storage to allocate for the task. This parameter is // used to expand the total amount of ephemeral storage available, beyond the // default amount, for tasks hosted on Fargate. type EphemeralStorage struct { // The total amount, in GiB, of ephemeral storage to set for the task. The minimum // supported value is 21 GiB and the maximum supported value is 200 GiB. // // This member is required. SizeInGiB *int32 noSmithyDocumentSerde } // Specifies an array of up to 5 conditions to be met, and an action to take ( RETRY // or EXIT ) if all conditions are met. If none of the EvaluateOnExit conditions // in a RetryStrategy match, then the job is retried. type EvaluateOnExit struct { // Specifies the action to take if all of the specified conditions ( onStatusReason // , onReason , and onExitCode ) are met. The values aren't case sensitive. // // This member is required. Action RetryAction // Contains a glob pattern to match against the decimal representation of the // ExitCode returned for a job. The pattern can be up to 512 characters long. It // can contain only numbers, and can end with an asterisk (*) so that only the // start of the string needs to be an exact match. The string can contain up to 512 // characters. OnExitCode *string // Contains a glob pattern to match against the Reason returned for a job. The // pattern can contain up to 512 characters. It can contain letters, numbers, // periods (.), colons (:), and white space (including spaces and tabs). It can // optionally end with an asterisk (*) so that only the start of the string needs // to be an exact match. OnReason *string // Contains a glob pattern to match against the StatusReason returned for a job. // The pattern can contain up to 512 characters. It can contain letters, numbers, // periods (.), colons (:), and white spaces (including spaces or tabs). It can // optionally end with an asterisk (*) so that only the start of the string needs // to be an exact match. OnStatusReason *string noSmithyDocumentSerde } // The fair share policy for a scheduling policy. type FairsharePolicy struct { // A value used to reserve some of the available maximum vCPU for fair share // identifiers that aren't already used. The reserved ratio is // (computeReservation/100)^ActiveFairShares where ActiveFairShares is the // number of active fair share identifiers. For example, a computeReservation // value of 50 indicates that Batchreserves 50% of the maximum available vCPU if // there's only one fair share identifier. It reserves 25% if there are two fair // share identifiers. It reserves 12.5% if there are three fair share identifiers. // A computeReservation value of 25 indicates that Batch should reserve 25% of the // maximum available vCPU if there's only one fair share identifier, 6.25% if there // are two fair share identifiers, and 1.56% if there are three fair share // identifiers. The minimum value is 0 and the maximum value is 99. ComputeReservation *int32 // The amount of time (in seconds) to use to calculate a fair share percentage for // each fair share identifier in use. A value of zero (0) indicates that only // current usage is measured. The decay allows for more recently run jobs to have // more weight than jobs that ran earlier. The maximum supported value is 604800 (1 // week). ShareDecaySeconds *int32 // An array of SharedIdentifier objects that contain the weights for the fair // share identifiers for the fair share policy. Fair share identifiers that aren't // included have a default weight of 1.0 . ShareDistribution []ShareAttributes noSmithyDocumentSerde } // The platform configuration for jobs that are running on Fargate resources. Jobs // that run on EC2 resources must not specify this parameter. type FargatePlatformConfiguration struct { // The Fargate platform version where the jobs are running. A platform version is // specified only for jobs that are running on Fargate resources. If one isn't // specified, the LATEST platform version is used by default. This uses a recent, // approved version of the Fargate platform for compute resources. For more // information, see Fargate platform versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) // in the Amazon Elastic Container Service Developer Guide. PlatformVersion *string noSmithyDocumentSerde } // Determine whether your data volume persists on the host container instance and // where it's stored. If this parameter is empty, then the Docker daemon assigns a // host path for your data volume. However, the data isn't guaranteed to persist // after the containers that are associated with it stop running. type Host struct { // The path on the host container instance that's presented to the container. If // this parameter is empty, then the Docker daemon has assigned a host path for // you. If this parameter contains a file location, then the data volume persists // at the specified location on the host container instance until you delete it // manually. If the source path location doesn't exist on the host container // instance, the Docker daemon creates it. If the location does exist, the contents // of the source path folder are exported. This parameter isn't applicable to jobs // that run on Fargate resources. Don't provide this for these jobs. SourcePath *string noSmithyDocumentSerde } // An object that represents an Batch job definition. type JobDefinition struct { // The Amazon Resource Name (ARN) for the job definition. // // This member is required. JobDefinitionArn *string // The name of the job definition. // // This member is required. JobDefinitionName *string // The revision of the job definition. // // This member is required. Revision *int32 // The type of job definition. It's either container or multinode . If the job is // run on Fargate resources, then multinode isn't supported. For more information // about multi-node parallel jobs, see Creating a multi-node parallel job // definition (https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) // in the Batch User Guide. // // This member is required. Type *string // The orchestration type of the compute environment. The valid values are ECS // (default) or EKS . ContainerOrchestrationType OrchestrationType // An object with various properties specific to Amazon ECS based jobs. Valid // values are containerProperties , eksProperties , and nodeProperties . Only one // can be specified. ContainerProperties *ContainerProperties // An object with various properties that are specific to Amazon EKS based jobs. // Valid values are containerProperties , eksProperties , and nodeProperties . Only // one can be specified. EksProperties *EksProperties // An object with various properties that are specific to multi-node parallel // jobs. Valid values are containerProperties , eksProperties , and nodeProperties // . Only one can be specified. If the job runs on Fargate resources, don't specify // nodeProperties . Use containerProperties instead. NodeProperties *NodeProperties // Default parameters or parameter substitution placeholders that are set in the // job definition. Parameters are specified as a key-value pair mapping. Parameters // in a SubmitJob request override any corresponding parameter defaults from the // job definition. For more information about specifying parameters, see Job // definition parameters (https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html) // in the Batch User Guide. Parameters map[string]string // The platform capabilities required by the job definition. If no value is // specified, it defaults to EC2 . Jobs run on Fargate resources specify FARGATE . PlatformCapabilities []PlatformCapability // Specifies whether to propagate the tags from the job or job definition to the // corresponding Amazon ECS task. If no value is specified, the tags aren't // propagated. Tags can only be propagated to the tasks when the tasks are created. // For tags with the same name, job tags are given priority over job definitions // tags. If the total number of combined tags from the job and job definition is // over 50, the job is moved to the FAILED state. PropagateTags *bool // The retry strategy to use for failed jobs that are submitted with this job // definition. RetryStrategy *RetryStrategy // The scheduling priority of the job definition. This only affects jobs in job // queues with a fair share policy. Jobs with a higher scheduling priority are // scheduled before jobs with a lower scheduling priority. SchedulingPriority *int32 // The status of the job definition. Status *string // The tags that are applied to the job definition. Tags map[string]string // The timeout time for jobs that are submitted with this job definition. After // the amount of time you specify passes, Batch terminates your jobs if they aren't // finished. Timeout *JobTimeout noSmithyDocumentSerde } // An object that represents an Batch job dependency. type JobDependency struct { // The job ID of the Batch job that's associated with this dependency. JobId *string // The type of the job dependency. Type ArrayJobDependency noSmithyDocumentSerde } // An object that represents an Batch job. type JobDetail struct { // The Amazon Resource Name (ARN) of the job definition that this job uses. // // This member is required. JobDefinition *string // The job ID. // // This member is required. JobId *string // The job name. // // This member is required. JobName *string // The Amazon Resource Name (ARN) of the job queue that the job is associated with. // // This member is required. JobQueue *string // The Unix timestamp (in milliseconds) for when the job was started. More // specifically, it's when the job transitioned from the STARTING state to the // RUNNING state. This parameter isn't provided for child jobs of array jobs or // multi-node parallel jobs. // // This member is required. StartedAt *int64 // The current status for the job. If your jobs don't progress to STARTING , see // Jobs stuck in RUNNABLE status (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable) // in the troubleshooting section of the Batch User Guide. // // This member is required. Status JobStatus // The array properties of the job, if it's an array job. ArrayProperties *ArrayPropertiesDetail // A list of job attempts that are associated with this job. Attempts []AttemptDetail // An object that represents the details for the container that's associated with // the job. Container *ContainerDetail // The Unix timestamp (in milliseconds) for when the job was created. For // non-array jobs and parent array jobs, this is when the job entered the SUBMITTED // state. This is specifically at the time SubmitJob was called. For array child // jobs, this is when the child job was spawned by its parent and entered the // PENDING state. CreatedAt *int64 // A list of job IDs that this job depends on. DependsOn []JobDependency // A list of job attempts that are associated with this job. EksAttempts []EksAttemptDetail // An object with various properties that are specific to Amazon EKS based jobs. // Only one of container , eksProperties , or nodeDetails is specified. EksProperties *EksPropertiesDetail // Indicates whether the job is canceled. IsCancelled *bool // Indicates whether the job is terminated. IsTerminated *bool // The Amazon Resource Name (ARN) of the job. JobArn *string // An object that represents the details of a node that's associated with a // multi-node parallel job. NodeDetails *NodeDetails // An object that represents the node properties of a multi-node parallel job. // This isn't applicable to jobs that are running on Fargate resources. NodeProperties *NodeProperties // Additional parameters that are passed to the job that replace parameter // substitution placeholders or override any corresponding parameter defaults from // the job definition. Parameters map[string]string // The platform capabilities required by the job definition. If no value is // specified, it defaults to EC2 . Jobs run on Fargate resources specify FARGATE . PlatformCapabilities []PlatformCapability // Specifies whether to propagate the tags from the job or job definition to the // corresponding Amazon ECS task. If no value is specified, the tags aren't // propagated. Tags can only be propagated to the tasks when the tasks are created. // For tags with the same name, job tags are given priority over job definitions // tags. If the total number of combined tags from the job and job definition is // over 50, the job is moved to the FAILED state. PropagateTags *bool // The retry strategy to use for this job if an attempt fails. RetryStrategy *RetryStrategy // The scheduling policy of the job definition. This only affects jobs in job // queues with a fair share policy. Jobs with a higher scheduling priority are // scheduled before jobs with a lower scheduling priority. SchedulingPriority *int32 // The share identifier for the job. ShareIdentifier *string // A short, human-readable string to provide more details for the current status // of the job. StatusReason *string // The Unix timestamp (in milliseconds) for when the job was stopped. More // specifically, it's when the job transitioned from the RUNNING state to a // terminal state, such as SUCCEEDED or FAILED . StoppedAt *int64 // The tags that are applied to the job. Tags map[string]string // The timeout configuration for the job. Timeout *JobTimeout noSmithyDocumentSerde } // An object that represents the details for an Batch job queue. type JobQueueDetail struct { // The compute environments that are attached to the job queue and the order that // job placement is preferred. Compute environments are selected for job placement // in ascending order. // // This member is required. ComputeEnvironmentOrder []ComputeEnvironmentOrder // The Amazon Resource Name (ARN) of the job queue. // // This member is required. JobQueueArn *string // The job queue name. // // This member is required. JobQueueName *string // The priority of the job queue. Job queues with a higher priority (or a higher // integer value for the priority parameter) are evaluated first when associated // with the same compute environment. Priority is determined in descending order. // For example, a job queue with a priority value of 10 is given scheduling // preference over a job queue with a priority value of 1 . All of the compute // environments must be either EC2 ( EC2 or SPOT ) or Fargate ( FARGATE or // FARGATE_SPOT ). EC2 and Fargate compute environments can't be mixed. // // This member is required. Priority *int32 // Describes the ability of the queue to accept new jobs. If the job queue state // is ENABLED , it can accept jobs. If the job queue state is DISABLED , new jobs // can't be added to the queue, but jobs already in the queue can finish. // // This member is required. State JQState // The Amazon Resource Name (ARN) of the scheduling policy. The format is // aws:Partition:batch:Region:Account:scheduling-policy/Name . For example, // aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy . SchedulingPolicyArn *string // The status of the job queue (for example, CREATING or VALID ). Status JQStatus // A short, human-readable string to provide additional details for the current // status of the job queue. StatusReason *string // The tags that are applied to the job queue. For more information, see Tagging // your Batch resources (https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html) // in Batch User Guide. Tags map[string]string noSmithyDocumentSerde } // An object that represents summary details of a job. type JobSummary struct { // The job ID. // // This member is required. JobId *string // The job name. // // This member is required. JobName *string // The array properties of the job, if it's an array job. ArrayProperties *ArrayPropertiesSummary // An object that represents the details of the container that's associated with // the job. Container *ContainerSummary // The Unix timestamp (in milliseconds) for when the job was created. For // non-array jobs and parent array jobs, this is when the job entered the SUBMITTED // state (at the time SubmitJob was called). For array child jobs, this is when // the child job was spawned by its parent and entered the PENDING state. CreatedAt *int64 // The Amazon Resource Name (ARN) of the job. JobArn *string // The Amazon Resource Name (ARN) of the job definition. JobDefinition *string // The node properties for a single node in a job summary list. This isn't // applicable to jobs that are running on Fargate resources. NodeProperties *NodePropertiesSummary // The Unix timestamp for when the job was started. More specifically, it's when // the job transitioned from the STARTING state to the RUNNING state. StartedAt *int64 // The current status for the job. Status JobStatus // A short, human-readable string to provide more details for the current status // of the job. StatusReason *string // The Unix timestamp for when the job was stopped. More specifically, it's when // the job transitioned from the RUNNING state to a terminal state, such as // SUCCEEDED or FAILED . StoppedAt *int64 noSmithyDocumentSerde } // An object that represents a job timeout configuration. type JobTimeout struct { // The job timeout time (in seconds) that's measured from the job attempt's // startedAt timestamp. After this time passes, Batch terminates your jobs if they // aren't finished. The minimum value for the timeout is 60 seconds. For array // jobs, the timeout applies to the child jobs, not to the parent array job. For // multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the // individual nodes. AttemptDurationSeconds *int32 noSmithyDocumentSerde } // A key-value pair object. type KeyValuePair struct { // The name of the key-value pair. For environment variables, this is the name of // the environment variable. Name *string // The value of the key-value pair. For environment variables, this is the value // of the environment variable. Value *string noSmithyDocumentSerde } // A filter name and value pair that's used to return a more specific list of // results from a ListJobs API operation. type KeyValuesPair struct { // The name of the filter. Filter names are case sensitive. Name *string // The filter values. Values []string noSmithyDocumentSerde } // An object that represents a launch template that's associated with a compute // resource. You must specify either the launch template ID or launch template name // in the request, but not both. If security groups are specified using both the // securityGroupIds parameter of CreateComputeEnvironment and the launch template, // the values in the securityGroupIds parameter of CreateComputeEnvironment will // be used. This object isn't applicable to jobs that are running on Fargate // resources. type LaunchTemplateSpecification struct { // The ID of the launch template. LaunchTemplateId *string // The name of the launch template. LaunchTemplateName *string // The version number of the launch template, $Latest , or $Default . If the value // is $Latest , the latest version of the launch template is used. If the value is // $Default , the default version of the launch template is used. If the AMI ID // that's used in a compute environment is from the launch template, the AMI isn't // changed when the compute environment is updated. It's only changed if the // updateToLatestImageVersion parameter for the compute environment is set to true // . During an infrastructure update, if either $Latest or $Default is specified, // Batch re-evaluates the launch template version, and it might use a different // version of the launch template. This is the case even if the launch template // isn't specified in the update. When updating a compute environment, changing the // launch template requires an infrastructure update of the compute environment. // For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. Default: $Default . Version *string noSmithyDocumentSerde } // Linux-specific modifications that are applied to the container, such as details // for device mappings. type LinuxParameters struct { // Any of the host devices to expose to the container. This parameter maps to // Devices in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --device option to docker run (https://docs.docker.com/engine/reference/run/) // . This parameter isn't applicable to jobs that are running on Fargate resources. // Don't provide it for these jobs. Devices []Device // If true, run an init process inside the container that forwards signals and // reaps processes. This parameter maps to the --init option to docker run (https://docs.docker.com/engine/reference/run/) // . This parameter requires version 1.25 of the Docker Remote API or greater on // your container instance. To check the Docker Remote API version on your // container instance, log in to your container instance and run the following // command: sudo docker version | grep "Server API version" InitProcessEnabled *bool // The total amount of swap memory (in MiB) a container can use. This parameter is // translated to the --memory-swap option to docker run (https://docs.docker.com/engine/reference/run/) // where the value is the sum of the container memory plus the maxSwap value. For // more information, see --memory-swap details (https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details) // in the Docker documentation. If a maxSwap value of 0 is specified, the // container doesn't use swap. Accepted values are 0 or any positive integer. If // the maxSwap parameter is omitted, the container doesn't use the swap // configuration for the container instance that it's running on. A maxSwap value // must be set for the swappiness parameter to be used. This parameter isn't // applicable to jobs that are running on Fargate resources. Don't provide it for // these jobs. MaxSwap *int32 // The value for the size (in MiB) of the /dev/shm volume. This parameter maps to // the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/) // . This parameter isn't applicable to jobs that are running on Fargate resources. // Don't provide it for these jobs. SharedMemorySize *int32 // You can use this parameter to tune a container's memory swappiness behavior. A // swappiness value of 0 causes swapping to not occur unless absolutely necessary. // A swappiness value of 100 causes pages to be swapped aggressively. Valid values // are whole numbers between 0 and 100 . If the swappiness parameter isn't // specified, a default value of 60 is used. If a value isn't specified for maxSwap // , then this parameter is ignored. If maxSwap is set to 0, the container doesn't // use swap. This parameter maps to the --memory-swappiness option to docker run (https://docs.docker.com/engine/reference/run/) // . Consider the following when you use a per-container swap configuration. // - Swap space must be enabled and allocated on the container instance for the // containers to use. By default, the Amazon ECS optimized AMIs don't have swap // enabled. You must enable swap on the instance to use this feature. For more // information, see Instance store swap volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-swap-volumes.html) // in the Amazon EC2 User Guide for Linux Instances or How do I allocate memory // to work as swap space in an Amazon EC2 instance by using a swap file? (http://aws.amazon.com/premiumsupport/knowledge-center/ec2-memory-swap-file/) // - The swap space parameters are only supported for job definitions using EC2 // resources. // - If the maxSwap and swappiness parameters are omitted from a job definition, // each container has a default swappiness value of 60. Moreover, the total swap // usage is limited to two times the memory reservation of the container. // This parameter isn't applicable to jobs that are running on Fargate resources. // Don't provide it for these jobs. Swappiness *int32 // The container path, mount options, and size (in MiB) of the tmpfs mount. This // parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/) // . This parameter isn't applicable to jobs that are running on Fargate resources. // Don't provide this parameter for this resource type. Tmpfs []Tmpfs noSmithyDocumentSerde } // Log configuration options to send to a custom log driver for the container. type LogConfiguration struct { // The log driver to use for the container. The valid values that are listed for // this parameter are log drivers that the Amazon ECS container agent can // communicate with by default. The supported log drivers are awslogs , fluentd , // gelf , json-file , journald , logentries , syslog , and splunk . Jobs that are // running on Fargate resources are restricted to the awslogs and splunk log // drivers. awslogs Specifies the Amazon CloudWatch Logs logging driver. For more // information, see Using the awslogs log driver (https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html) // in the Batch User Guide and Amazon CloudWatch Logs logging driver (https://docs.docker.com/config/containers/logging/awslogs/) // in the Docker documentation. fluentd Specifies the Fluentd logging driver. For // more information including usage and options, see Fluentd logging driver (https://docs.docker.com/config/containers/logging/fluentd/) // in the Docker documentation. gelf Specifies the Graylog Extended Format (GELF) // logging driver. For more information including usage and options, see Graylog // Extended Format logging driver (https://docs.docker.com/config/containers/logging/gelf/) // in the Docker documentation. journald Specifies the journald logging driver. For // more information including usage and options, see Journald logging driver (https://docs.docker.com/config/containers/logging/journald/) // in the Docker documentation. json-file Specifies the JSON file logging driver. // For more information including usage and options, see JSON File logging driver (https://docs.docker.com/config/containers/logging/json-file/) // in the Docker documentation. splunk Specifies the Splunk logging driver. For // more information including usage and options, see Splunk logging driver (https://docs.docker.com/config/containers/logging/splunk/) // in the Docker documentation. syslog Specifies the syslog logging driver. For // more information including usage and options, see Syslog logging driver (https://docs.docker.com/config/containers/logging/syslog/) // in the Docker documentation. If you have a custom driver that's not listed // earlier that you want to work with the Amazon ECS container agent, you can fork // the Amazon ECS container agent project that's available on GitHub (https://github.com/aws/amazon-ecs-agent) // and customize it to work with that driver. We encourage you to submit pull // requests for changes that you want to have included. However, Amazon Web // Services doesn't currently support running modified copies of this software. // This parameter requires version 1.18 of the Docker Remote API or greater on your // container instance. To check the Docker Remote API version on your container // instance, log in to your container instance and run the following command: sudo // docker version | grep "Server API version" // // This member is required. LogDriver LogDriver // The configuration options to send to the log driver. This parameter requires // version 1.19 of the Docker Remote API or greater on your container instance. To // check the Docker Remote API version on your container instance, log in to your // container instance and run the following command: sudo docker version | grep // "Server API version" Options map[string]string // The secrets to pass to the log configuration. For more information, see // Specifying sensitive data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) // in the Batch User Guide. SecretOptions []Secret noSmithyDocumentSerde } // Details for a Docker volume mount point that's used in a job's container // properties. This parameter maps to Volumes in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.19/#create-a-container) // section of the Docker Remote API and the --volume option to docker run. type MountPoint struct { // The path on the container where the host volume is mounted. ContainerPath *string // If this value is true , the container has read-only access to the volume. // Otherwise, the container can write to the volume. The default value is false . ReadOnly *bool // The name of the volume to mount. SourceVolume *string noSmithyDocumentSerde } // The network configuration for jobs that are running on Fargate resources. Jobs // that are running on EC2 resources must not specify this parameter. type NetworkConfiguration struct { // Indicates whether the job has a public IP address. For a job that's running on // Fargate resources in a private subnet to send outbound traffic to the internet // (for example, to pull container images), the private subnet requires a NAT // gateway be attached to route requests to the internet. For more information, see // Amazon ECS task networking (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) // in the Amazon Elastic Container Service Developer Guide. The default value is " // DISABLED ". AssignPublicIp AssignPublicIp noSmithyDocumentSerde } // An object that represents the elastic network interface for a multi-node // parallel job node. type NetworkInterface struct { // The attachment ID for the network interface. AttachmentId *string // The private IPv6 address for the network interface. Ipv6Address *string // The private IPv4 address for the network interface. PrivateIpv4Address *string noSmithyDocumentSerde } // An object that represents the details of a multi-node parallel job node. type NodeDetails struct { // Specifies whether the current node is the main node for a multi-node parallel // job. IsMainNode *bool // The node index for the node. Node index numbering starts at zero. This index is // also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment // variable. NodeIndex *int32 noSmithyDocumentSerde } // An object that represents any node overrides to a job definition that's used in // a SubmitJob API operation. This parameter isn't applicable to jobs that are // running on Fargate resources. Don't provide it for these jobs. Rather, use // containerOverrides instead. type NodeOverrides struct { // The node property overrides for the job. NodePropertyOverrides []NodePropertyOverride // The number of nodes to use with a multi-node parallel job. This value overrides // the number of nodes that are specified in the job definition. To use this // override, you must meet the following conditions: // - There must be at least one node range in your job definition that has an // open upper boundary, such as : or n: . // - The lower boundary of the node range that's specified in the job definition // must be fewer than the number of nodes specified in the override. // - The main node index that's specified in the job definition must be fewer // than the number of nodes specified in the override. NumNodes *int32 noSmithyDocumentSerde } // An object that represents the node properties of a multi-node parallel job. // Node properties can't be specified for Amazon EKS based job definitions. type NodeProperties struct { // Specifies the node index for the main node of a multi-node parallel job. This // node index value must be fewer than the number of nodes. // // This member is required. MainNode *int32 // A list of node ranges and their properties that are associated with a // multi-node parallel job. // // This member is required. NodeRangeProperties []NodeRangeProperty // The number of nodes that are associated with a multi-node parallel job. // // This member is required. NumNodes *int32 noSmithyDocumentSerde } // An object that represents the properties of a node that's associated with a // multi-node parallel job. type NodePropertiesSummary struct { // Specifies whether the current node is the main node for a multi-node parallel // job. IsMainNode *bool // The node index for the node. Node index numbering begins at zero. This index is // also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment // variable. NodeIndex *int32 // The number of nodes that are associated with a multi-node parallel job. NumNodes *int32 noSmithyDocumentSerde } // The object that represents any node overrides to a job definition that's used // in a SubmitJob API operation. type NodePropertyOverride struct { // The range of nodes, using node index values, that's used to override. A range // of 0:3 indicates nodes with index values of 0 through 3 . If the starting range // value is omitted ( :n ), then 0 is used to start the range. If the ending range // value is omitted ( n: ), then the highest possible node index is used to end the // range. // // This member is required. TargetNodes *string // The overrides that are sent to a node range. ContainerOverrides *ContainerOverrides noSmithyDocumentSerde } // An object that represents the properties of the node range for a multi-node // parallel job. type NodeRangeProperty struct { // The range of nodes, using node index values. A range of 0:3 indicates nodes // with index values of 0 through 3 . If the starting range value is omitted ( :n // ), then 0 is used to start the range. If the ending range value is omitted ( n: // ), then the highest possible node index is used to end the range. Your // accumulative node ranges must account for all nodes ( 0:n ). You can nest node // ranges (for example, 0:10 and 4:5 ). In this case, the 4:5 range properties // override the 0:10 properties. // // This member is required. TargetNodes *string // The container details for the node range. Container *ContainerProperties noSmithyDocumentSerde } // The type and amount of a resource to assign to a container. The supported // resources include GPU , MEMORY , and VCPU . type ResourceRequirement struct { // The type of resource to assign to a container. The supported resources include // GPU , MEMORY , and VCPU . // // This member is required. Type ResourceType // The quantity of the specified resource to reserve for the container. The values // vary based on the type specified. type="GPU" The number of physical GPUs to // reserve for the container. Make sure that the number of GPUs reserved for all // containers in a job doesn't exceed the number of available GPUs on the compute // resource that the job is launched on. GPUs aren't available for jobs that are // running on Fargate resources. type="MEMORY" The memory hard limit (in MiB) // present to the container. This parameter is supported for jobs that are running // on EC2 resources. If your container attempts to exceed the memory specified, the // container is terminated. This parameter maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/) // . You must specify at least 4 MiB of memory for a job. This is required but can // be specified in several places for multi-node parallel (MNP) jobs. It must be // specified for each node at least once. This parameter maps to Memory in the // Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/) // . If you're trying to maximize your resource utilization by providing your jobs // as much memory as possible for a particular instance type, see Memory management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) // in the Batch User Guide. For jobs that are running on Fargate resources, then // value is the hard limit (in MiB), and must match one of the supported values and // the VCPU values must be one of the values supported for that memory value. // value = 512 VCPU = 0.25 value = 1024 VCPU = 0.25 or 0.5 value = 2048 VCPU = // 0.25, 0.5, or 1 value = 3072 VCPU = 0.5, or 1 value = 4096 VCPU = 0.5, 1, or 2 // value = 5120, 6144, or 7168 VCPU = 1 or 2 value = 8192 VCPU = 1, 2, or 4 value // = 9216, 10240, 11264, 12288, 13312, 14336, or 15360 VCPU = 2 or 4 value = 16384 // VCPU = 2, 4, or 8 value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, // 26624, 27648, 29696, or 30720 VCPU = 4 value = 20480, 24576, or 28672 VCPU = 4 // or 8 value = 36864, 45056, 53248, or 61440 VCPU = 8 value = 32768, 40960, // 49152, or 57344 VCPU = 8 or 16 value = 65536, 73728, 81920, 90112, 98304, // 106496, 114688, or 122880 VCPU = 16 type="VCPU" The number of vCPUs reserved // for the container. This parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/) // . Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must // specify at least one vCPU. This is required but can be specified in several // places; it must be specified for each node at least once. The default for the // Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information // about Fargate quotas, see Fargate quotas (https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) // in the Amazon Web Services General Reference. For jobs that are running on // Fargate resources, then value must match one of the supported values and the // MEMORY values must be one of the values supported for that VCPU value. The // supported values are 0.25, 0.5, 1, 2, 4, 8, and 16 value = 0.25 MEMORY = 512, // 1024, or 2048 value = 0.5 MEMORY = 1024, 2048, 3072, or 4096 value = 1 MEMORY = // 2048, 3072, 4096, 5120, 6144, 7168, or 8192 value = 2 MEMORY = 4096, 5120, // 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384 value // = 4 MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, // 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, // 28672, 29696, or 30720 value = 8 MEMORY = 16384, 20480, 24576, 28672, 32768, // 36864, 40960, 45056, 49152, 53248, 57344, or 61440 value = 16 MEMORY = 32768, // 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or // 122880 // // This member is required. Value *string noSmithyDocumentSerde } // The retry strategy that's associated with a job. For more information, see // Automated job retries (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html) // in the Batch User Guide. type RetryStrategy struct { // The number of times to move a job to the RUNNABLE status. You can specify // between 1 and 10 attempts. If the value of attempts is greater than one, the // job is retried on failure the same number of attempts as the value. Attempts *int32 // Array of up to 5 objects that specify the conditions where jobs are retried or // failed. If this parameter is specified, then the attempts parameter must also // be specified. If none of the listed conditions match, then the job is retried. EvaluateOnExit []EvaluateOnExit noSmithyDocumentSerde } // An object that represents the compute environment architecture for Batch jobs // on Fargate. type RuntimePlatform struct { // The vCPU architecture. The default value is X86_64 . Valid values are X86_64 // and ARM64 . This parameter must be set to X86_64 for Windows containers. CpuArchitecture *string // The operating system for the compute environment. Valid values are: LINUX // (default), WINDOWS_SERVER_2019_CORE , WINDOWS_SERVER_2019_FULL , // WINDOWS_SERVER_2022_CORE , and WINDOWS_SERVER_2022_FULL . The following // parameters can’t be set for Windows containers: linuxParameters , privileged , // user , ulimits , readonlyRootFilesystem , and efsVolumeConfiguration . The Batch // Scheduler checks before registering a task definition with Fargate. If the job // requires a Windows container and the first compute environment is LINUX , the // compute environment is skipped and the next is checked until a Windows-based // compute environment is found. Fargate Spot is not supported for Windows-based // containers on Fargate. A job queue will be blocked if a Fargate Windows job is // submitted to a job queue with only Fargate Spot compute environments. However, // you can attach both FARGATE and FARGATE_SPOT compute environments to the same // job queue. OperatingSystemFamily *string noSmithyDocumentSerde } // An object that represents a scheduling policy. type SchedulingPolicyDetail struct { // The Amazon Resource Name (ARN) of the scheduling policy. An example is // arn:aws:batch:us-east-1:123456789012:scheduling-policy/HighPriority . // // This member is required. Arn *string // The name of the scheduling policy. // // This member is required. Name *string // The fair share policy for the scheduling policy. FairsharePolicy *FairsharePolicy // The tags that you apply to the scheduling policy to categorize and organize // your resources. Each tag consists of a key and an optional value. For more // information, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) // in Amazon Web Services General Reference. Tags map[string]string noSmithyDocumentSerde } // An object that contains the details of a scheduling policy that's returned in a // ListSchedulingPolicy action. type SchedulingPolicyListingDetail struct { // Amazon Resource Name (ARN) of the scheduling policy. // // This member is required. Arn *string noSmithyDocumentSerde } // An object that represents the secret to expose to your container. Secrets can // be exposed to a container in the following ways: // - To inject sensitive data into your containers as environment variables, use // the secrets container definition parameter. // - To reference sensitive information in the log configuration of a container, // use the secretOptions container definition parameter. // // For more information, see Specifying sensitive data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) // in the Batch User Guide. type Secret struct { // The name of the secret. // // This member is required. Name *string // The secret to expose to the container. The supported values are either the full // Amazon Resource Name (ARN) of the Secrets Manager secret or the full ARN of the // parameter in the Amazon Web Services Systems Manager Parameter Store. If the // Amazon Web Services Systems Manager Parameter Store parameter exists in the same // Region as the job you're launching, then you can use either the full Amazon // Resource Name (ARN) or name of the parameter. If the parameter exists in a // different Region, then the full ARN must be specified. // // This member is required. ValueFrom *string noSmithyDocumentSerde } // Specifies the weights for the fair share identifiers for the fair share policy. // Fair share identifiers that aren't included have a default weight of 1.0 . type ShareAttributes struct { // A fair share identifier or fair share identifier prefix. If the string ends // with an asterisk (*), this entry specifies the weight factor to use for fair // share identifiers that start with that prefix. The list of fair share // identifiers in a fair share policy can't overlap. For example, you can't have // one that specifies a shareIdentifier of UserA* and another that specifies a // shareIdentifier of UserA-1 . There can be no more than 500 fair share // identifiers active in a job queue. The string is limited to 255 alphanumeric // characters, and can be followed by an asterisk (*). // // This member is required. ShareIdentifier *string // The weight factor for the fair share identifier. The default value is 1.0. A // lower value has a higher priority for compute resources. For example, jobs that // use a share identifier with a weight factor of 0.125 (1/8) get 8 times the // compute resources of jobs that use a share identifier with a weight factor of 1. // The smallest supported value is 0.0001, and the largest supported value is // 999.9999. WeightFactor *float32 noSmithyDocumentSerde } // The container path, mount options, and size of the tmpfs mount. This object // isn't applicable to jobs that are running on Fargate resources. type Tmpfs struct { // The absolute file path in the container where the tmpfs volume is mounted. // // This member is required. ContainerPath *string // The size (in MiB) of the tmpfs volume. // // This member is required. Size *int32 // The list of tmpfs volume mount options. Valid values: " defaults " | " ro " | " // rw " | " suid " | " nosuid " | " dev " | " nodev " | " exec " | " noexec " | " // sync " | " async " | " dirsync " | " remount " | " mand " | " nomand " | " atime // " | " noatime " | " diratime " | " nodiratime " | " bind " | " rbind" | // "unbindable" | "runbindable" | "private" | "rprivate" | "shared" | "rshared" | // "slave" | "rslave" | "relatime " | " norelatime " | " strictatime " | " // nostrictatime " | " mode " | " uid " | " gid " | " nr_inodes " | " nr_blocks " | // " mpol " MountOptions []string noSmithyDocumentSerde } // The ulimit settings to pass to the container. This object isn't applicable to // jobs that are running on Fargate resources. type Ulimit struct { // The hard limit for the ulimit type. // // This member is required. HardLimit *int32 // The type of the ulimit . // // This member is required. Name *string // The soft limit for the ulimit type. // // This member is required. SoftLimit *int32 noSmithyDocumentSerde } // Specifies the infrastructure update policy for the compute environment. For // more information about infrastructure updates, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) // in the Batch User Guide. type UpdatePolicy struct { // Specifies the job timeout (in minutes) when the compute environment // infrastructure is updated. The default value is 30. JobExecutionTimeoutMinutes int64 // Specifies whether jobs are automatically terminated when the computer // environment infrastructure is updated. The default value is false . TerminateJobsOnUpdate *bool noSmithyDocumentSerde } // A data volume that's used in a job's container properties. type Volume struct { // This parameter is specified when you're using an Amazon Elastic File System // file system for job storage. Jobs that are running on Fargate resources must // specify a platformVersion of at least 1.4.0 . EfsVolumeConfiguration *EFSVolumeConfiguration // The contents of the host parameter determine whether your data volume persists // on the host container instance and where it's stored. If the host parameter is // empty, then the Docker daemon assigns a host path for your data volume. However, // the data isn't guaranteed to persist after the containers that are associated // with it stop running. This parameter isn't applicable to jobs that are running // on Fargate resources and shouldn't be provided. Host *Host // The name of the volume. It can be up to 255 characters long. It can contain // uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This // name is referenced in the sourceVolume parameter of container definition // mountPoints . Name *string noSmithyDocumentSerde } type noSmithyDocumentSerde = smithydocument.NoSerde