// Code generated by smithy-go-codegen DO NOT EDIT. package types import ( smithydocument "github.com/aws/smithy-go/document" "time" ) // This structure specifies the VPC subnets and security groups for the task, and // whether a public IP address is to be used. This structure is relevant only for // ECS tasks that use the awsvpc network mode. type AwsVpcConfiguration struct { // Specifies the subnets associated with the task. These subnets must all be in // the same VPC. You can specify as many as 16 subnets. // // This member is required. Subnets []string // Specifies whether the task's elastic network interface receives a public IP // address. You can specify ENABLED only when LaunchType in EcsParameters is set // to FARGATE . AssignPublicIp AssignPublicIp // Specifies the security groups associated with the task. These security groups // must all be in the same VPC. You can specify as many as five security groups. If // you do not specify a security group, the default security group for the VPC is // used. SecurityGroups []string noSmithyDocumentSerde } // The array properties for the submitted job, such as the size of the array. The // array size can be between 2 and 10,000. If you specify array properties for a // job, it becomes an array job. This parameter is used only if the target is an // Batch job. type BatchArrayProperties struct { // The size of the array, if this is an array batch job. Size int32 noSmithyDocumentSerde } // The overrides that are sent to a container. type BatchContainerOverrides struct { // The command to send to the container that overrides the default command from // the Docker image or the task definition. Command []string // The environment variables to send to the container. You can add new environment // variables, which are added to the container at launch, or you can override the // existing environment variables from the Docker image or the task definition. // Environment variables cannot start with " Batch ". This naming convention is // reserved for variables that Batch sets. Environment []BatchEnvironmentVariable // The instance type to use for a multi-node parallel job. This parameter isn't // applicable to single-node container jobs or jobs that run on Fargate resources, // and shouldn't be provided. InstanceType *string // The type and amount of resources to assign to a container. This overrides the // settings in the job definition. The supported resources include GPU , MEMORY , // and VCPU . ResourceRequirements []BatchResourceRequirement noSmithyDocumentSerde } // The environment variables to send to the container. You can add new environment // variables, which are added to the container at launch, or you can override the // existing environment variables from the Docker image or the task definition. // Environment variables cannot start with " Batch ". This naming convention is // reserved for variables that Batch sets. type BatchEnvironmentVariable struct { // The name of the key-value pair. For environment variables, this is the name of // the environment variable. Name *string // The value of the key-value pair. For environment variables, this is the value // of the environment variable. Value *string noSmithyDocumentSerde } // An object that represents an Batch job dependency. type BatchJobDependency struct { // The job ID of the Batch job that's associated with this dependency. JobId *string // The type of the job dependency. Type BatchJobDependencyType noSmithyDocumentSerde } // The type and amount of a resource to assign to a container. The supported // resources include GPU , MEMORY , and VCPU . type BatchResourceRequirement struct { // The type of resource to assign to a container. The supported resources include // GPU , MEMORY , and VCPU . // // This member is required. Type BatchResourceRequirementType // The quantity of the specified resource to reserve for the container. The values // vary based on the type specified. type="GPU" The number of physical GPUs to // reserve for the container. Make sure that the number of GPUs reserved for all // containers in a job doesn't exceed the number of available GPUs on the compute // resource that the job is launched on. GPUs aren't available for jobs that are // running on Fargate resources. type="MEMORY" The memory hard limit (in MiB) // present to the container. This parameter is supported for jobs that are running // on EC2 resources. If your container attempts to exceed the memory specified, the // container is terminated. This parameter maps to Memory in the Create a // container (https://docs.docker.com/engine/api/v1.23/#create-a-container) section // of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the // --memory option to docker run (https://docs.docker.com/engine/reference/run/) . // You must specify at least 4 MiB of memory for a job. This is required but can be // specified in several places for multi-node parallel (MNP) jobs. It must be // specified for each node at least once. This parameter maps to Memory in the // Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/) // . If you're trying to maximize your resource utilization by providing your jobs // as much memory as possible for a particular instance type, see Memory management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) // in the Batch User Guide. For jobs that are running on Fargate resources, then // value is the hard limit (in MiB), and must match one of the supported values and // the VCPU values must be one of the values supported for that memory value. // value = 512 VCPU = 0.25 value = 1024 VCPU = 0.25 or 0.5 value = 2048 VCPU = // 0.25, 0.5, or 1 value = 3072 VCPU = 0.5, or 1 value = 4096 VCPU = 0.5, 1, or 2 // value = 5120, 6144, or 7168 VCPU = 1 or 2 value = 8192 VCPU = 1, 2, 4, or 8 // value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360 VCPU = 2 or 4 value = // 16384 VCPU = 2, 4, or 8 value = 17408, 18432, 19456, 21504, 22528, 23552, // 25600, 26624, 27648, 29696, or 30720 VCPU = 4 value = 20480, 24576, or 28672 // VCPU = 4 or 8 value = 36864, 45056, 53248, or 61440 VCPU = 8 value = 32768, // 40960, 49152, or 57344 VCPU = 8 or 16 value = 65536, 73728, 81920, 90112, // 98304, 106496, 114688, or 122880 VCPU = 16 type="VCPU" The number of vCPUs // reserved for the container. This parameter maps to CpuShares in the Create a // container (https://docs.docker.com/engine/api/v1.23/#create-a-container) section // of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the // --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/) // . Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must // specify at least one vCPU. This is required but can be specified in several // places; it must be specified for each node at least once. The default for the // Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information // about Fargate quotas, see Fargate quotas (https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) // in the Amazon Web Services General Reference. For jobs that are running on // Fargate resources, then value must match one of the supported values and the // MEMORY values must be one of the values supported for that VCPU value. The // supported values are 0.25, 0.5, 1, 2, 4, 8, and 16 value = 0.25 MEMORY = 512, // 1024, or 2048 value = 0.5 MEMORY = 1024, 2048, 3072, or 4096 value = 1 MEMORY = // 2048, 3072, 4096, 5120, 6144, 7168, or 8192 value = 2 MEMORY = 4096, 5120, // 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384 value // = 4 MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, // 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, // 28672, 29696, or 30720 value = 8 MEMORY = 16384, 20480, 24576, 28672, 32768, // 36864, 40960, 45056, 49152, 53248, 57344, or 61440 value = 16 MEMORY = 32768, // 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or // 122880 // // This member is required. Value *string noSmithyDocumentSerde } // The retry strategy that's associated with a job. For more information, see // Automated job retries (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html) // in the Batch User Guide. type BatchRetryStrategy struct { // The number of times to move a job to the RUNNABLE status. If the value of // attempts is greater than one, the job is retried on failure the same number of // attempts as the value. Attempts int32 noSmithyDocumentSerde } // The details of a capacity provider strategy. To learn more, see // CapacityProviderStrategyItem (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CapacityProviderStrategyItem.html) // in the Amazon ECS API Reference. type CapacityProviderStrategyItem struct { // The short name of the capacity provider. // // This member is required. CapacityProvider *string // The base value designates how many tasks, at a minimum, to run on the specified // capacity provider. Only one capacity provider in a capacity provider strategy // can have a base defined. If no value is specified, the default value of 0 is // used. Base int32 // The weight value designates the relative percentage of the total number of // tasks launched that should use the specified capacity provider. The weight value // is taken into consideration after the base value, if defined, is satisfied. Weight int32 noSmithyDocumentSerde } // A DeadLetterConfig object that contains information about a dead-letter queue // configuration. type DeadLetterConfig struct { // The ARN of the Amazon SQS queue specified as the target for the dead-letter // queue. Arn *string noSmithyDocumentSerde } // The overrides that are sent to a container. An empty container override can be // passed in. An example of an empty container override is {"containerOverrides": // [ ] } . If a non-empty container override is specified, the name parameter must // be included. type EcsContainerOverride struct { // The command to send to the container that overrides the default command from // the Docker image or the task definition. You must also specify a container name. Command []string // The number of cpu units reserved for the container, instead of the default // value from the task definition. You must also specify a container name. Cpu *int32 // The environment variables to send to the container. You can add new environment // variables, which are added to the container at launch, or you can override the // existing environment variables from the Docker image or the task definition. You // must also specify a container name. Environment []EcsEnvironmentVariable // A list of files containing the environment variables to pass to a container, // instead of the value from the container definition. EnvironmentFiles []EcsEnvironmentFile // The hard limit (in MiB) of memory to present to the container, instead of the // default value from the task definition. If your container attempts to exceed the // memory specified here, the container is killed. You must also specify a // container name. Memory *int32 // The soft limit (in MiB) of memory to reserve for the container, instead of the // default value from the task definition. You must also specify a container name. MemoryReservation *int32 // The name of the container that receives the override. This parameter is // required if any override is specified. Name *string // The type and amount of a resource to assign to a container, instead of the // default value from the task definition. The only supported resource is a GPU. ResourceRequirements []EcsResourceRequirement noSmithyDocumentSerde } // A list of files containing the environment variables to pass to a container. // You can specify up to ten environment files. The file must have a .env file // extension. Each line in an environment file should contain an environment // variable in VARIABLE=VALUE format. Lines beginning with # are treated as // comments and are ignored. For more information about the environment variable // file syntax, see Declare default environment variables in file (https://docs.docker.com/compose/env-file/) // . If there are environment variables specified using the environment parameter // in a container definition, they take precedence over the variables contained // within an environment file. If multiple environment files are specified that // contain the same variable, they're processed from the top down. We recommend // that you use unique variable names. For more information, see Specifying // environment variables (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) // in the Amazon Elastic Container Service Developer Guide. This parameter is only // supported for tasks hosted on Fargate using the following platform versions: // - Linux platform version 1.4.0 or later. // - Windows platform version 1.0.0 or later. type EcsEnvironmentFile struct { // The file type to use. The only supported value is s3 . // // This member is required. Type EcsEnvironmentFileType // The Amazon Resource Name (ARN) of the Amazon S3 object containing the // environment variable file. // // This member is required. Value *string noSmithyDocumentSerde } // The environment variables to send to the container. You can add new environment // variables, which are added to the container at launch, or you can override the // existing environment variables from the Docker image or the task definition. You // must also specify a container name. type EcsEnvironmentVariable struct { // The name of the key-value pair. For environment variables, this is the name of // the environment variable. Name *string // The value of the key-value pair. For environment variables, this is the value // of the environment variable. Value *string noSmithyDocumentSerde } // The amount of ephemeral storage to allocate for the task. This parameter is // used to expand the total amount of ephemeral storage available, beyond the // default amount, for tasks hosted on Fargate. For more information, see Fargate // task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) // in the Amazon ECS User Guide for Fargate. This parameter is only supported for // tasks hosted on Fargate using Linux platform version 1.4.0 or later. This // parameter is not supported for Windows containers on Fargate. type EcsEphemeralStorage struct { // The total amount, in GiB, of ephemeral storage to set for the task. The minimum // supported value is 21 GiB and the maximum supported value is 200 GiB. // // This member is required. SizeInGiB int32 noSmithyDocumentSerde } // Details on an Elastic Inference accelerator task override. This parameter is // used to override the Elastic Inference accelerator specified in the task // definition. For more information, see Working with Amazon Elastic Inference on // Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/userguide/ecs-inference.html) // in the Amazon Elastic Container Service Developer Guide. type EcsInferenceAcceleratorOverride struct { // The Elastic Inference accelerator device name to override for the task. This // parameter must match a deviceName specified in the task definition. DeviceName *string // The Elastic Inference accelerator type to use. DeviceType *string noSmithyDocumentSerde } // The type and amount of a resource to assign to a container. The supported // resource types are GPUs and Elastic Inference accelerators. For more // information, see Working with GPUs on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html) // or Working with Amazon Elastic Inference on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html) // in the Amazon Elastic Container Service Developer Guide type EcsResourceRequirement struct { // The type of resource to assign to a container. The supported values are GPU or // InferenceAccelerator . // // This member is required. Type EcsResourceRequirementType // The value for the specified resource type. If the GPU type is used, the value // is the number of physical GPUs the Amazon ECS container agent reserves for the // container. The number of GPUs that's reserved for all containers in a task can't // exceed the number of available GPUs on the container instance that the task is // launched on. If the InferenceAccelerator type is used, the value matches the // deviceName for an InferenceAccelerator specified in a task definition. // // This member is required. Value *string noSmithyDocumentSerde } // The overrides that are associated with a task. type EcsTaskOverride struct { // One or more container overrides that are sent to a task. ContainerOverrides []EcsContainerOverride // The cpu override for the task. Cpu *string // The ephemeral storage setting override for the task. This parameter is only // supported for tasks hosted on Fargate that use the following platform versions: // - Linux platform version 1.4.0 or later. // - Windows platform version 1.0.0 or later. EphemeralStorage *EcsEphemeralStorage // The Amazon Resource Name (ARN) of the task execution IAM role override for the // task. For more information, see Amazon ECS task execution IAM role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html) // in the Amazon Elastic Container Service Developer Guide. ExecutionRoleArn *string // The Elastic Inference accelerator override for the task. InferenceAcceleratorOverrides []EcsInferenceAcceleratorOverride // The memory override for the task. Memory *string // The Amazon Resource Name (ARN) of the IAM role that containers in this task can // assume. All containers in this task are granted the permissions that are // specified in this role. For more information, see IAM Role for Tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) // in the Amazon Elastic Container Service Developer Guide. TaskRoleArn *string noSmithyDocumentSerde } // Filter events using an event pattern. For more information, see Events and // Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) // in the Amazon EventBridge User Guide. type Filter struct { // The event pattern. Pattern *string noSmithyDocumentSerde } // The collection of event patterns used to filter events. For more information, // see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) // in the Amazon EventBridge User Guide. type FilterCriteria struct { // The event patterns. Filters []Filter noSmithyDocumentSerde } // The Secrets Manager secret that stores your broker credentials. // // The following types satisfy this interface: // // MQBrokerAccessCredentialsMemberBasicAuth type MQBrokerAccessCredentials interface { isMQBrokerAccessCredentials() } // The ARN of the Secrets Manager secret. type MQBrokerAccessCredentialsMemberBasicAuth struct { Value string noSmithyDocumentSerde } func (*MQBrokerAccessCredentialsMemberBasicAuth) isMQBrokerAccessCredentials() {} // The Secrets Manager secret that stores your stream credentials. // // The following types satisfy this interface: // // MSKAccessCredentialsMemberClientCertificateTlsAuth // MSKAccessCredentialsMemberSaslScram512Auth type MSKAccessCredentials interface { isMSKAccessCredentials() } // The ARN of the Secrets Manager secret. type MSKAccessCredentialsMemberClientCertificateTlsAuth struct { Value string noSmithyDocumentSerde } func (*MSKAccessCredentialsMemberClientCertificateTlsAuth) isMSKAccessCredentials() {} // The ARN of the Secrets Manager secret. type MSKAccessCredentialsMemberSaslScram512Auth struct { Value string noSmithyDocumentSerde } func (*MSKAccessCredentialsMemberSaslScram512Auth) isMSKAccessCredentials() {} // This structure specifies the network configuration for an Amazon ECS task. type NetworkConfiguration struct { // Use this structure to specify the VPC subnets and security groups for the task, // and whether a public IP address is to be used. This structure is relevant only // for ECS tasks that use the awsvpc network mode. AwsvpcConfiguration *AwsVpcConfiguration noSmithyDocumentSerde } // An object that represents a pipe. Amazon EventBridgePipes connect event sources // to targets and reduces the need for specialized knowledge and integration code. type Pipe struct { // The ARN of the pipe. Arn *string // The time the pipe was created. CreationTime *time.Time // The state the pipe is in. CurrentState PipeState // The state the pipe should be in. DesiredState RequestedPipeState // The ARN of the enrichment resource. Enrichment *string // When the pipe was last updated, in ISO-8601 format (https://www.w3.org/TR/NOTE-datetime) // (YYYY-MM-DDThh:mm:ss.sTZD). LastModifiedTime *time.Time // The name of the pipe. Name *string // The ARN of the source resource. Source *string // The reason the pipe is in its current state. StateReason *string // The ARN of the target resource. Target *string noSmithyDocumentSerde } // These are custom parameter to be used when the target is an API Gateway REST // APIs or EventBridge ApiDestinations. In the latter case, these are merged with // any InvocationParameters specified on the Connection, with any values from the // Connection taking precedence. type PipeEnrichmentHttpParameters struct { // The headers that need to be sent as part of request invoking the API Gateway // REST API or EventBridge ApiDestination. HeaderParameters map[string]string // The path parameter values to be used to populate API Gateway REST API or // EventBridge ApiDestination path wildcards ("*"). PathParameterValues []string // The query string keys/values that need to be sent as part of request invoking // the API Gateway REST API or EventBridge ApiDestination. QueryStringParameters map[string]string noSmithyDocumentSerde } // The parameters required to set up enrichment on your pipe. type PipeEnrichmentParameters struct { // Contains the HTTP parameters to use when the target is a API Gateway REST // endpoint or EventBridge ApiDestination. If you specify an API Gateway REST API // or EventBridge ApiDestination as a target, you can use this parameter to specify // headers, path parameters, and query string keys/values as part of your target // invoking request. If you're using ApiDestinations, the corresponding Connection // can also have these values configured. In case of any conflicting keys, values // from the Connection take precedence. HttpParameters *PipeEnrichmentHttpParameters // Valid JSON text passed to the enrichment. In this case, nothing from the event // itself is passed to the enrichment. For more information, see The JavaScript // Object Notation (JSON) Data Interchange Format (http://www.rfc-editor.org/rfc/rfc7159.txt) // . InputTemplate *string noSmithyDocumentSerde } // The parameters for using an Active MQ broker as a source. type PipeSourceActiveMQBrokerParameters struct { // The credentials needed to access the resource. // // This member is required. Credentials MQBrokerAccessCredentials // The name of the destination queue to consume. // // This member is required. QueueName *string // The maximum number of records to include in each batch. BatchSize *int32 // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 noSmithyDocumentSerde } // The parameters for using a DynamoDB stream as a source. type PipeSourceDynamoDBStreamParameters struct { // (Streams only) The position in a stream from which to start reading. // // This member is required. StartingPosition DynamoDBStreamStartPosition // The maximum number of records to include in each batch. BatchSize *int32 // Define the target queue to send dead-letter queue events to. DeadLetterConfig *DeadLetterConfig // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 // (Streams only) Discard records older than the specified age. The default value // is -1, which sets the maximum age to infinite. When the value is set to // infinite, EventBridge never discards old records. MaximumRecordAgeInSeconds *int32 // (Streams only) Discard records after the specified number of retries. The // default value is -1, which sets the maximum number of retries to infinite. When // MaximumRetryAttempts is infinite, EventBridge retries failed records until the // record expires in the event source. MaximumRetryAttempts *int32 // (Streams only) Define how to handle item process failures. AUTOMATIC_BISECT // halves each batch and retry each half until all the records are processed or // there is one failed message left in the batch. OnPartialBatchItemFailure OnPartialBatchItemFailureStreams // (Streams only) The number of batches to process concurrently from each shard. // The default value is 1. ParallelizationFactor *int32 noSmithyDocumentSerde } // The parameters for using a Kinesis stream as a source. type PipeSourceKinesisStreamParameters struct { // (Streams only) The position in a stream from which to start reading. // // This member is required. StartingPosition KinesisStreamStartPosition // The maximum number of records to include in each batch. BatchSize *int32 // Define the target queue to send dead-letter queue events to. DeadLetterConfig *DeadLetterConfig // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 // (Streams only) Discard records older than the specified age. The default value // is -1, which sets the maximum age to infinite. When the value is set to // infinite, EventBridge never discards old records. MaximumRecordAgeInSeconds *int32 // (Streams only) Discard records after the specified number of retries. The // default value is -1, which sets the maximum number of retries to infinite. When // MaximumRetryAttempts is infinite, EventBridge retries failed records until the // record expires in the event source. MaximumRetryAttempts *int32 // (Streams only) Define how to handle item process failures. AUTOMATIC_BISECT // halves each batch and retry each half until all the records are processed or // there is one failed message left in the batch. OnPartialBatchItemFailure OnPartialBatchItemFailureStreams // (Streams only) The number of batches to process concurrently from each shard. // The default value is 1. ParallelizationFactor *int32 // With StartingPosition set to AT_TIMESTAMP , the time from which to start // reading, in Unix time seconds. StartingPositionTimestamp *time.Time noSmithyDocumentSerde } // The parameters for using an MSK stream as a source. type PipeSourceManagedStreamingKafkaParameters struct { // The name of the topic that the pipe will read from. // // This member is required. TopicName *string // The maximum number of records to include in each batch. BatchSize *int32 // The name of the destination queue to consume. ConsumerGroupID *string // The credentials needed to access the resource. Credentials MSKAccessCredentials // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 // (Streams only) The position in a stream from which to start reading. StartingPosition MSKStartPosition noSmithyDocumentSerde } // The parameters required to set up a source for your pipe. type PipeSourceParameters struct { // The parameters for using an Active MQ broker as a source. ActiveMQBrokerParameters *PipeSourceActiveMQBrokerParameters // The parameters for using a DynamoDB stream as a source. DynamoDBStreamParameters *PipeSourceDynamoDBStreamParameters // The collection of event patterns used to filter events. For more information, // see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) // in the Amazon EventBridge User Guide. FilterCriteria *FilterCriteria // The parameters for using a Kinesis stream as a source. KinesisStreamParameters *PipeSourceKinesisStreamParameters // The parameters for using an MSK stream as a source. ManagedStreamingKafkaParameters *PipeSourceManagedStreamingKafkaParameters // The parameters for using a Rabbit MQ broker as a source. RabbitMQBrokerParameters *PipeSourceRabbitMQBrokerParameters // The parameters for using a self-managed Apache Kafka stream as a source. SelfManagedKafkaParameters *PipeSourceSelfManagedKafkaParameters // The parameters for using a Amazon SQS stream as a source. SqsQueueParameters *PipeSourceSqsQueueParameters noSmithyDocumentSerde } // The parameters for using a Rabbit MQ broker as a source. type PipeSourceRabbitMQBrokerParameters struct { // The credentials needed to access the resource. // // This member is required. Credentials MQBrokerAccessCredentials // The name of the destination queue to consume. // // This member is required. QueueName *string // The maximum number of records to include in each batch. BatchSize *int32 // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 // The name of the virtual host associated with the source broker. VirtualHost *string noSmithyDocumentSerde } // The parameters for using a self-managed Apache Kafka stream as a source. type PipeSourceSelfManagedKafkaParameters struct { // The name of the topic that the pipe will read from. // // This member is required. TopicName *string // An array of server URLs. AdditionalBootstrapServers []string // The maximum number of records to include in each batch. BatchSize *int32 // The name of the destination queue to consume. ConsumerGroupID *string // The credentials needed to access the resource. Credentials SelfManagedKafkaAccessConfigurationCredentials // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 // The ARN of the Secrets Manager secret used for certification. ServerRootCaCertificate *string // (Streams only) The position in a stream from which to start reading. StartingPosition SelfManagedKafkaStartPosition // This structure specifies the VPC subnets and security groups for the stream, // and whether a public IP address is to be used. Vpc *SelfManagedKafkaAccessConfigurationVpc noSmithyDocumentSerde } // The parameters for using a Amazon SQS stream as a source. type PipeSourceSqsQueueParameters struct { // The maximum number of records to include in each batch. BatchSize *int32 // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 noSmithyDocumentSerde } // The parameters for using an Batch job as a target. type PipeTargetBatchJobParameters struct { // The job definition used by this job. This value can be one of name , // name:revision , or the Amazon Resource Name (ARN) for the job definition. If // name is specified without a revision then the latest active revision is used. // // This member is required. JobDefinition *string // The name of the job. It can be up to 128 letters long. The first character must // be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens // (-), and underscores (_). // // This member is required. JobName *string // The array properties for the submitted job, such as the size of the array. The // array size can be between 2 and 10,000. If you specify array properties for a // job, it becomes an array job. This parameter is used only if the target is an // Batch job. ArrayProperties *BatchArrayProperties // The overrides that are sent to a container. ContainerOverrides *BatchContainerOverrides // A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. // You can specify a SEQUENTIAL type dependency without specifying a job ID for // array jobs so that each child array job completes sequentially, starting at // index 0. You can also specify an N_TO_N type dependency with a job ID for array // jobs. In that case, each index child of this job must wait for the corresponding // index child of each dependency to complete before it can begin. DependsOn []BatchJobDependency // Additional parameters passed to the job that replace parameter substitution // placeholders that are set in the job definition. Parameters are specified as a // key and value pair mapping. Parameters included here override any corresponding // parameter defaults from the job definition. Parameters map[string]string // The retry strategy to use for failed jobs. When a retry strategy is specified // here, it overrides the retry strategy defined in the job definition. RetryStrategy *BatchRetryStrategy noSmithyDocumentSerde } // The parameters for using an CloudWatch Logs log stream as a target. type PipeTargetCloudWatchLogsParameters struct { // The name of the log stream. LogStreamName *string // The time the event occurred, expressed as the number of milliseconds after Jan // 1, 1970 00:00:00 UTC. Timestamp *string noSmithyDocumentSerde } // The parameters for using an Amazon ECS task as a target. type PipeTargetEcsTaskParameters struct { // The ARN of the task definition to use if the event target is an Amazon ECS task. // // This member is required. TaskDefinitionArn *string // The capacity provider strategy to use for the task. If a // capacityProviderStrategy is specified, the launchType parameter must be // omitted. If no capacityProviderStrategy or launchType is specified, the // defaultCapacityProviderStrategy for the cluster is used. CapacityProviderStrategy []CapacityProviderStrategyItem // Specifies whether to enable Amazon ECS managed tags for the task. For more // information, see Tagging Your Amazon ECS Resources (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) // in the Amazon Elastic Container Service Developer Guide. EnableECSManagedTags bool // Whether or not to enable the execute command functionality for the containers // in this task. If true, this enables execute command functionality on all // containers in the task. EnableExecuteCommand bool // Specifies an Amazon ECS task group for the task. The maximum length is 255 // characters. Group *string // Specifies the launch type on which your task is running. The launch type that // you specify here must match one of the launch type (compatibilities) of the // target task. The FARGATE value is supported only in the Regions where Fargate // with Amazon ECS is supported. For more information, see Fargate on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS-Fargate.html) // in the Amazon Elastic Container Service Developer Guide. LaunchType LaunchType // Use this structure if the Amazon ECS task uses the awsvpc network mode. This // structure specifies the VPC subnets and security groups associated with the // task, and whether a public IP address is to be used. This structure is required // if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks. // If you specify NetworkConfiguration when the target ECS task does not use the // awsvpc network mode, the task fails. NetworkConfiguration *NetworkConfiguration // The overrides that are associated with a task. Overrides *EcsTaskOverride // An array of placement constraint objects to use for the task. You can specify // up to 10 constraints per task (including constraints in the task definition and // those specified at runtime). PlacementConstraints []PlacementConstraint // The placement strategy objects to use for the task. You can specify a maximum // of five strategy rules per task. PlacementStrategy []PlacementStrategy // Specifies the platform version for the task. Specify only the numeric portion // of the platform version, such as 1.1.0 . This structure is used only if // LaunchType is FARGATE . For more information about valid platform versions, see // Fargate Platform Versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) // in the Amazon Elastic Container Service Developer Guide. PlatformVersion *string // Specifies whether to propagate the tags from the task definition to the task. // If no value is specified, the tags are not propagated. Tags can only be // propagated to the task during task creation. To add tags to a task after task // creation, use the TagResource API action. PropagateTags PropagateTags // The reference ID to use for the task. ReferenceId *string // The metadata that you apply to the task to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you // define. To learn more, see RunTask (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html#ECS-RunTask-request-tags) // in the Amazon ECS API Reference. Tags []Tag // The number of tasks to create based on TaskDefinition . The default is 1. TaskCount *int32 noSmithyDocumentSerde } // The parameters for using an EventBridge event bus as a target. type PipeTargetEventBridgeEventBusParameters struct { // A free-form string, with a maximum of 128 characters, used to decide what // fields to expect in the event detail. DetailType *string // The URL subdomain of the endpoint. For example, if the URL for Endpoint is // https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is // abcde.veo . When using Java, you must include auth-crt on the class path. EndpointId *string // Amazon Web Services resources, identified by Amazon Resource Name (ARN), which // the event primarily concerns. Any number, including zero, may be present. Resources []string // The source of the event. Source *string // The time stamp of the event, per RFC3339 (https://www.rfc-editor.org/rfc/rfc3339.txt) // . If no time stamp is provided, the time stamp of the PutEvents (https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_PutEvents.html) // call is used. Time *string noSmithyDocumentSerde } // These are custom parameter to be used when the target is an API Gateway REST // APIs or EventBridge ApiDestinations. type PipeTargetHttpParameters struct { // The headers that need to be sent as part of request invoking the API Gateway // REST API or EventBridge ApiDestination. HeaderParameters map[string]string // The path parameter values to be used to populate API Gateway REST API or // EventBridge ApiDestination path wildcards ("*"). PathParameterValues []string // The query string keys/values that need to be sent as part of request invoking // the API Gateway REST API or EventBridge ApiDestination. QueryStringParameters map[string]string noSmithyDocumentSerde } // The parameters for using a Kinesis stream as a source. type PipeTargetKinesisStreamParameters struct { // Determines which shard in the stream the data record is assigned to. Partition // keys are Unicode strings with a maximum length limit of 256 characters for each // key. Amazon Kinesis Data Streams uses the partition key as input to a hash // function that maps the partition key and associated data to a specific shard. // Specifically, an MD5 hash function is used to map partition keys to 128-bit // integer values and to map associated data records to shards. As a result of this // hashing mechanism, all data records with the same partition key map to the same // shard within the stream. // // This member is required. PartitionKey *string noSmithyDocumentSerde } // The parameters for using a Lambda function as a target. type PipeTargetLambdaFunctionParameters struct { // Choose from the following options. // - RequestResponse (default) - Invoke the function synchronously. Keep the // connection open until the function returns a response or times out. The API // response includes the function response and additional data. // - Event - Invoke the function asynchronously. Send events that fail multiple // times to the function's dead-letter queue (if it's configured). The API response // only includes a status code. // - DryRun - Validate parameter values and verify that the user or role has // permission to invoke the function. InvocationType PipeTargetInvocationType noSmithyDocumentSerde } // The parameters required to set up a target for your pipe. type PipeTargetParameters struct { // The parameters for using an Batch job as a target. BatchJobParameters *PipeTargetBatchJobParameters // The parameters for using an CloudWatch Logs log stream as a target. CloudWatchLogsParameters *PipeTargetCloudWatchLogsParameters // The parameters for using an Amazon ECS task as a target. EcsTaskParameters *PipeTargetEcsTaskParameters // The parameters for using an EventBridge event bus as a target. EventBridgeEventBusParameters *PipeTargetEventBridgeEventBusParameters // These are custom parameter to be used when the target is an API Gateway REST // APIs or EventBridge ApiDestinations. HttpParameters *PipeTargetHttpParameters // Valid JSON text passed to the target. In this case, nothing from the event // itself is passed to the target. For more information, see The JavaScript Object // Notation (JSON) Data Interchange Format (http://www.rfc-editor.org/rfc/rfc7159.txt) // . InputTemplate *string // The parameters for using a Kinesis stream as a source. KinesisStreamParameters *PipeTargetKinesisStreamParameters // The parameters for using a Lambda function as a target. LambdaFunctionParameters *PipeTargetLambdaFunctionParameters // These are custom parameters to be used when the target is a Amazon Redshift // cluster to invoke the Amazon Redshift Data API ExecuteStatement. RedshiftDataParameters *PipeTargetRedshiftDataParameters // The parameters for using a SageMaker pipeline as a target. SageMakerPipelineParameters *PipeTargetSageMakerPipelineParameters // The parameters for using a Amazon SQS stream as a source. SqsQueueParameters *PipeTargetSqsQueueParameters // The parameters for using a Step Functions state machine as a target. StepFunctionStateMachineParameters *PipeTargetStateMachineParameters noSmithyDocumentSerde } // These are custom parameters to be used when the target is a Amazon Redshift // cluster to invoke the Amazon Redshift Data API ExecuteStatement. type PipeTargetRedshiftDataParameters struct { // The name of the database. Required when authenticating using temporary // credentials. // // This member is required. Database *string // The SQL statement text to run. // // This member is required. Sqls []string // The database user name. Required when authenticating using temporary // credentials. DbUser *string // The name or ARN of the secret that enables access to the database. Required // when authenticating using SageMaker. SecretManagerArn *string // The name of the SQL statement. You can name the SQL statement when you create // it to identify the query. StatementName *string // Indicates whether to send an event back to EventBridge after the SQL statement // runs. WithEvent bool noSmithyDocumentSerde } // The parameters for using a SageMaker pipeline as a target. type PipeTargetSageMakerPipelineParameters struct { // List of Parameter names and values for SageMaker Model Building Pipeline // execution. PipelineParameterList []SageMakerPipelineParameter noSmithyDocumentSerde } // The parameters for using a Amazon SQS stream as a source. type PipeTargetSqsQueueParameters struct { // This parameter applies only to FIFO (first-in-first-out) queues. The token used // for deduplication of sent messages. MessageDeduplicationId *string // The FIFO message group ID to use as the target. MessageGroupId *string noSmithyDocumentSerde } // The parameters for using a Step Functions state machine as a target. type PipeTargetStateMachineParameters struct { // Specify whether to wait for the state machine to finish or not. InvocationType PipeTargetInvocationType noSmithyDocumentSerde } // An object representing a constraint on task placement. To learn more, see Task // Placement Constraints (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) // in the Amazon Elastic Container Service Developer Guide. type PlacementConstraint struct { // A cluster query language expression to apply to the constraint. You cannot // specify an expression if the constraint type is distinctInstance . To learn // more, see Cluster Query Language (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) // in the Amazon Elastic Container Service Developer Guide. Expression *string // The type of constraint. Use distinctInstance to ensure that each task in a // particular group is running on a different container instance. Use memberOf to // restrict the selection to a group of valid candidates. Type PlacementConstraintType noSmithyDocumentSerde } // The task placement strategy for a task or service. To learn more, see Task // Placement Strategies (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html) // in the Amazon Elastic Container Service Service Developer Guide. type PlacementStrategy struct { // The field to apply the placement strategy against. For the spread placement // strategy, valid values are instanceId (or host, which has the same effect), or // any platform or custom attribute that is applied to a container instance, such // as attribute:ecs.availability-zone. For the binpack placement strategy, valid // values are cpu and memory. For the random placement strategy, this field is not // used. Field *string // The type of placement strategy. The random placement strategy randomly places // tasks on available candidates. The spread placement strategy spreads placement // across available candidates evenly based on the field parameter. The binpack // strategy places tasks on available candidates that have the least available // amount of the resource that is specified with the field parameter. For example, // if you binpack on memory, a task is placed on the instance with the least amount // of remaining memory (but still enough to run the task). Type PlacementStrategyType noSmithyDocumentSerde } // Name/Value pair of a parameter to start execution of a SageMaker Model Building // Pipeline. type SageMakerPipelineParameter struct { // Name of parameter to start execution of a SageMaker Model Building Pipeline. // // This member is required. Name *string // Value of parameter to start execution of a SageMaker Model Building Pipeline. // // This member is required. Value *string noSmithyDocumentSerde } // The Secrets Manager secret that stores your stream credentials. // // The following types satisfy this interface: // // SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth // SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth // SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth // SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth type SelfManagedKafkaAccessConfigurationCredentials interface { isSelfManagedKafkaAccessConfigurationCredentials() } // The ARN of the Secrets Manager secret. type SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth struct { Value string noSmithyDocumentSerde } func (*SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth) isSelfManagedKafkaAccessConfigurationCredentials() { } // The ARN of the Secrets Manager secret. type SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth struct { Value string noSmithyDocumentSerde } func (*SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth) isSelfManagedKafkaAccessConfigurationCredentials() { } // The ARN of the Secrets Manager secret. type SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth struct { Value string noSmithyDocumentSerde } func (*SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth) isSelfManagedKafkaAccessConfigurationCredentials() { } // The ARN of the Secrets Manager secret. type SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth struct { Value string noSmithyDocumentSerde } func (*SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth) isSelfManagedKafkaAccessConfigurationCredentials() { } // This structure specifies the VPC subnets and security groups for the stream, // and whether a public IP address is to be used. type SelfManagedKafkaAccessConfigurationVpc struct { // Specifies the security groups associated with the stream. These security groups // must all be in the same VPC. You can specify as many as five security groups. If // you do not specify a security group, the default security group for the VPC is // used. SecurityGroup []string // Specifies the subnets associated with the stream. These subnets must all be in // the same VPC. You can specify as many as 16 subnets. Subnets []string noSmithyDocumentSerde } // A key-value pair associated with an Amazon Web Services resource. In // EventBridge, rules and event buses support tagging. type Tag struct { // A string you can use to assign a value. The combination of tag keys and values // can help you organize and categorize your resources. // // This member is required. Key *string // The value for the specified tag key. // // This member is required. Value *string noSmithyDocumentSerde } // The parameters for using an Active MQ broker as a source. type UpdatePipeSourceActiveMQBrokerParameters struct { // The credentials needed to access the resource. // // This member is required. Credentials MQBrokerAccessCredentials // The maximum number of records to include in each batch. BatchSize *int32 // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 noSmithyDocumentSerde } // The parameters for using a DynamoDB stream as a source. type UpdatePipeSourceDynamoDBStreamParameters struct { // The maximum number of records to include in each batch. BatchSize *int32 // Define the target queue to send dead-letter queue events to. DeadLetterConfig *DeadLetterConfig // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 // (Streams only) Discard records older than the specified age. The default value // is -1, which sets the maximum age to infinite. When the value is set to // infinite, EventBridge never discards old records. MaximumRecordAgeInSeconds *int32 // (Streams only) Discard records after the specified number of retries. The // default value is -1, which sets the maximum number of retries to infinite. When // MaximumRetryAttempts is infinite, EventBridge retries failed records until the // record expires in the event source. MaximumRetryAttempts *int32 // (Streams only) Define how to handle item process failures. AUTOMATIC_BISECT // halves each batch and retry each half until all the records are processed or // there is one failed message left in the batch. OnPartialBatchItemFailure OnPartialBatchItemFailureStreams // (Streams only) The number of batches to process concurrently from each shard. // The default value is 1. ParallelizationFactor *int32 noSmithyDocumentSerde } // The parameters for using a Kinesis stream as a source. type UpdatePipeSourceKinesisStreamParameters struct { // The maximum number of records to include in each batch. BatchSize *int32 // Define the target queue to send dead-letter queue events to. DeadLetterConfig *DeadLetterConfig // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 // (Streams only) Discard records older than the specified age. The default value // is -1, which sets the maximum age to infinite. When the value is set to // infinite, EventBridge never discards old records. MaximumRecordAgeInSeconds *int32 // (Streams only) Discard records after the specified number of retries. The // default value is -1, which sets the maximum number of retries to infinite. When // MaximumRetryAttempts is infinite, EventBridge retries failed records until the // record expires in the event source. MaximumRetryAttempts *int32 // (Streams only) Define how to handle item process failures. AUTOMATIC_BISECT // halves each batch and retry each half until all the records are processed or // there is one failed message left in the batch. OnPartialBatchItemFailure OnPartialBatchItemFailureStreams // (Streams only) The number of batches to process concurrently from each shard. // The default value is 1. ParallelizationFactor *int32 noSmithyDocumentSerde } // The parameters for using an MSK stream as a source. type UpdatePipeSourceManagedStreamingKafkaParameters struct { // The maximum number of records to include in each batch. BatchSize *int32 // The credentials needed to access the resource. Credentials MSKAccessCredentials // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 noSmithyDocumentSerde } // The parameters required to set up a source for your pipe. type UpdatePipeSourceParameters struct { // The parameters for using an Active MQ broker as a source. ActiveMQBrokerParameters *UpdatePipeSourceActiveMQBrokerParameters // The parameters for using a DynamoDB stream as a source. DynamoDBStreamParameters *UpdatePipeSourceDynamoDBStreamParameters // The collection of event patterns used to filter events. For more information, // see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) // in the Amazon EventBridge User Guide. FilterCriteria *FilterCriteria // The parameters for using a Kinesis stream as a source. KinesisStreamParameters *UpdatePipeSourceKinesisStreamParameters // The parameters for using an MSK stream as a source. ManagedStreamingKafkaParameters *UpdatePipeSourceManagedStreamingKafkaParameters // The parameters for using a Rabbit MQ broker as a source. RabbitMQBrokerParameters *UpdatePipeSourceRabbitMQBrokerParameters // The parameters for using a self-managed Apache Kafka stream as a source. SelfManagedKafkaParameters *UpdatePipeSourceSelfManagedKafkaParameters // The parameters for using a Amazon SQS stream as a source. SqsQueueParameters *UpdatePipeSourceSqsQueueParameters noSmithyDocumentSerde } // The parameters for using a Rabbit MQ broker as a source. type UpdatePipeSourceRabbitMQBrokerParameters struct { // The credentials needed to access the resource. // // This member is required. Credentials MQBrokerAccessCredentials // The maximum number of records to include in each batch. BatchSize *int32 // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 noSmithyDocumentSerde } // The parameters for using a self-managed Apache Kafka stream as a source. type UpdatePipeSourceSelfManagedKafkaParameters struct { // The maximum number of records to include in each batch. BatchSize *int32 // The credentials needed to access the resource. Credentials SelfManagedKafkaAccessConfigurationCredentials // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 // The ARN of the Secrets Manager secret used for certification. ServerRootCaCertificate *string // This structure specifies the VPC subnets and security groups for the stream, // and whether a public IP address is to be used. Vpc *SelfManagedKafkaAccessConfigurationVpc noSmithyDocumentSerde } // The parameters for using a Amazon SQS stream as a source. type UpdatePipeSourceSqsQueueParameters struct { // The maximum number of records to include in each batch. BatchSize *int32 // The maximum length of a time to wait for events. MaximumBatchingWindowInSeconds *int32 noSmithyDocumentSerde } // Indicates that an error has occurred while performing a validate operation. type ValidationExceptionField struct { // The message of the exception. // // This member is required. Message *string // The name of the exception. // // This member is required. Name *string noSmithyDocumentSerde } type noSmithyDocumentSerde = smithydocument.NoSerde // UnknownUnionMember is returned when a union member is returned over the wire, // but has an unknown tag. type UnknownUnionMember struct { Tag string Value []byte noSmithyDocumentSerde } func (*UnknownUnionMember) isMQBrokerAccessCredentials() {} func (*UnknownUnionMember) isMSKAccessCredentials() {} func (*UnknownUnionMember) isSelfManagedKafkaAccessConfigurationCredentials() {}