/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace Glue { namespace Model { /** *

Additional options for streaming.

See Also:

AWS * API Reference

*/ class KafkaStreamingSourceOptions { public: AWS_GLUE_API KafkaStreamingSourceOptions(); AWS_GLUE_API KafkaStreamingSourceOptions(Aws::Utils::Json::JsonView jsonValue); AWS_GLUE_API KafkaStreamingSourceOptions& operator=(Aws::Utils::Json::JsonView jsonValue); AWS_GLUE_API Aws::Utils::Json::JsonValue Jsonize() const; /** *

A list of bootstrap server URLs, for example, as * b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This * option must be specified in the API call or defined in the table metadata in the * Data Catalog.

*/ inline const Aws::String& GetBootstrapServers() const{ return m_bootstrapServers; } /** *

A list of bootstrap server URLs, for example, as * b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This * option must be specified in the API call or defined in the table metadata in the * Data Catalog.

*/ inline bool BootstrapServersHasBeenSet() const { return m_bootstrapServersHasBeenSet; } /** *

A list of bootstrap server URLs, for example, as * b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This * option must be specified in the API call or defined in the table metadata in the * Data Catalog.

*/ inline void SetBootstrapServers(const Aws::String& value) { m_bootstrapServersHasBeenSet = true; m_bootstrapServers = value; } /** *

A list of bootstrap server URLs, for example, as * b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This * option must be specified in the API call or defined in the table metadata in the * Data Catalog.

*/ inline void SetBootstrapServers(Aws::String&& value) { m_bootstrapServersHasBeenSet = true; m_bootstrapServers = std::move(value); } /** *

A list of bootstrap server URLs, for example, as * b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This * option must be specified in the API call or defined in the table metadata in the * Data Catalog.

*/ inline void SetBootstrapServers(const char* value) { m_bootstrapServersHasBeenSet = true; m_bootstrapServers.assign(value); } /** *

A list of bootstrap server URLs, for example, as * b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This * option must be specified in the API call or defined in the table metadata in the * Data Catalog.

*/ inline KafkaStreamingSourceOptions& WithBootstrapServers(const Aws::String& value) { SetBootstrapServers(value); return *this;} /** *

A list of bootstrap server URLs, for example, as * b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This * option must be specified in the API call or defined in the table metadata in the * Data Catalog.

*/ inline KafkaStreamingSourceOptions& WithBootstrapServers(Aws::String&& value) { SetBootstrapServers(std::move(value)); return *this;} /** *

A list of bootstrap server URLs, for example, as * b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This * option must be specified in the API call or defined in the table metadata in the * Data Catalog.

*/ inline KafkaStreamingSourceOptions& WithBootstrapServers(const char* value) { SetBootstrapServers(value); return *this;} /** *

The protocol used to communicate with brokers. The possible values are * "SSL" or "PLAINTEXT".

*/ inline const Aws::String& GetSecurityProtocol() const{ return m_securityProtocol; } /** *

The protocol used to communicate with brokers. The possible values are * "SSL" or "PLAINTEXT".

*/ inline bool SecurityProtocolHasBeenSet() const { return m_securityProtocolHasBeenSet; } /** *

The protocol used to communicate with brokers. The possible values are * "SSL" or "PLAINTEXT".

*/ inline void SetSecurityProtocol(const Aws::String& value) { m_securityProtocolHasBeenSet = true; m_securityProtocol = value; } /** *

The protocol used to communicate with brokers. The possible values are * "SSL" or "PLAINTEXT".

*/ inline void SetSecurityProtocol(Aws::String&& value) { m_securityProtocolHasBeenSet = true; m_securityProtocol = std::move(value); } /** *

The protocol used to communicate with brokers. The possible values are * "SSL" or "PLAINTEXT".

*/ inline void SetSecurityProtocol(const char* value) { m_securityProtocolHasBeenSet = true; m_securityProtocol.assign(value); } /** *

The protocol used to communicate with brokers. The possible values are * "SSL" or "PLAINTEXT".

*/ inline KafkaStreamingSourceOptions& WithSecurityProtocol(const Aws::String& value) { SetSecurityProtocol(value); return *this;} /** *

The protocol used to communicate with brokers. The possible values are * "SSL" or "PLAINTEXT".

*/ inline KafkaStreamingSourceOptions& WithSecurityProtocol(Aws::String&& value) { SetSecurityProtocol(std::move(value)); return *this;} /** *

The protocol used to communicate with brokers. The possible values are * "SSL" or "PLAINTEXT".

*/ inline KafkaStreamingSourceOptions& WithSecurityProtocol(const char* value) { SetSecurityProtocol(value); return *this;} /** *

The name of the connection.

*/ inline const Aws::String& GetConnectionName() const{ return m_connectionName; } /** *

The name of the connection.

*/ inline bool ConnectionNameHasBeenSet() const { return m_connectionNameHasBeenSet; } /** *

The name of the connection.

*/ inline void SetConnectionName(const Aws::String& value) { m_connectionNameHasBeenSet = true; m_connectionName = value; } /** *

The name of the connection.

*/ inline void SetConnectionName(Aws::String&& value) { m_connectionNameHasBeenSet = true; m_connectionName = std::move(value); } /** *

The name of the connection.

*/ inline void SetConnectionName(const char* value) { m_connectionNameHasBeenSet = true; m_connectionName.assign(value); } /** *

The name of the connection.

*/ inline KafkaStreamingSourceOptions& WithConnectionName(const Aws::String& value) { SetConnectionName(value); return *this;} /** *

The name of the connection.

*/ inline KafkaStreamingSourceOptions& WithConnectionName(Aws::String&& value) { SetConnectionName(std::move(value)); return *this;} /** *

The name of the connection.

*/ inline KafkaStreamingSourceOptions& WithConnectionName(const char* value) { SetConnectionName(value); return *this;} /** *

The topic name as specified in Apache Kafka. You must specify at least one of * "topicName", "assign" or * "subscribePattern".

*/ inline const Aws::String& GetTopicName() const{ return m_topicName; } /** *

The topic name as specified in Apache Kafka. You must specify at least one of * "topicName", "assign" or * "subscribePattern".

*/ inline bool TopicNameHasBeenSet() const { return m_topicNameHasBeenSet; } /** *

The topic name as specified in Apache Kafka. You must specify at least one of * "topicName", "assign" or * "subscribePattern".

*/ inline void SetTopicName(const Aws::String& value) { m_topicNameHasBeenSet = true; m_topicName = value; } /** *

The topic name as specified in Apache Kafka. You must specify at least one of * "topicName", "assign" or * "subscribePattern".

*/ inline void SetTopicName(Aws::String&& value) { m_topicNameHasBeenSet = true; m_topicName = std::move(value); } /** *

The topic name as specified in Apache Kafka. You must specify at least one of * "topicName", "assign" or * "subscribePattern".

*/ inline void SetTopicName(const char* value) { m_topicNameHasBeenSet = true; m_topicName.assign(value); } /** *

The topic name as specified in Apache Kafka. You must specify at least one of * "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithTopicName(const Aws::String& value) { SetTopicName(value); return *this;} /** *

The topic name as specified in Apache Kafka. You must specify at least one of * "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithTopicName(Aws::String&& value) { SetTopicName(std::move(value)); return *this;} /** *

The topic name as specified in Apache Kafka. You must specify at least one of * "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithTopicName(const char* value) { SetTopicName(value); return *this;} /** *

The specific TopicPartitions to consume. You must specify at * least one of "topicName", "assign" or * "subscribePattern".

*/ inline const Aws::String& GetAssign() const{ return m_assign; } /** *

The specific TopicPartitions to consume. You must specify at * least one of "topicName", "assign" or * "subscribePattern".

*/ inline bool AssignHasBeenSet() const { return m_assignHasBeenSet; } /** *

The specific TopicPartitions to consume. You must specify at * least one of "topicName", "assign" or * "subscribePattern".

*/ inline void SetAssign(const Aws::String& value) { m_assignHasBeenSet = true; m_assign = value; } /** *

The specific TopicPartitions to consume. You must specify at * least one of "topicName", "assign" or * "subscribePattern".

*/ inline void SetAssign(Aws::String&& value) { m_assignHasBeenSet = true; m_assign = std::move(value); } /** *

The specific TopicPartitions to consume. You must specify at * least one of "topicName", "assign" or * "subscribePattern".

*/ inline void SetAssign(const char* value) { m_assignHasBeenSet = true; m_assign.assign(value); } /** *

The specific TopicPartitions to consume. You must specify at * least one of "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithAssign(const Aws::String& value) { SetAssign(value); return *this;} /** *

The specific TopicPartitions to consume. You must specify at * least one of "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithAssign(Aws::String&& value) { SetAssign(std::move(value)); return *this;} /** *

The specific TopicPartitions to consume. You must specify at * least one of "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithAssign(const char* value) { SetAssign(value); return *this;} /** *

A Java regex string that identifies the topic list to subscribe to. You must * specify at least one of "topicName", "assign" or * "subscribePattern".

*/ inline const Aws::String& GetSubscribePattern() const{ return m_subscribePattern; } /** *

A Java regex string that identifies the topic list to subscribe to. You must * specify at least one of "topicName", "assign" or * "subscribePattern".

*/ inline bool SubscribePatternHasBeenSet() const { return m_subscribePatternHasBeenSet; } /** *

A Java regex string that identifies the topic list to subscribe to. You must * specify at least one of "topicName", "assign" or * "subscribePattern".

*/ inline void SetSubscribePattern(const Aws::String& value) { m_subscribePatternHasBeenSet = true; m_subscribePattern = value; } /** *

A Java regex string that identifies the topic list to subscribe to. You must * specify at least one of "topicName", "assign" or * "subscribePattern".

*/ inline void SetSubscribePattern(Aws::String&& value) { m_subscribePatternHasBeenSet = true; m_subscribePattern = std::move(value); } /** *

A Java regex string that identifies the topic list to subscribe to. You must * specify at least one of "topicName", "assign" or * "subscribePattern".

*/ inline void SetSubscribePattern(const char* value) { m_subscribePatternHasBeenSet = true; m_subscribePattern.assign(value); } /** *

A Java regex string that identifies the topic list to subscribe to. You must * specify at least one of "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithSubscribePattern(const Aws::String& value) { SetSubscribePattern(value); return *this;} /** *

A Java regex string that identifies the topic list to subscribe to. You must * specify at least one of "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithSubscribePattern(Aws::String&& value) { SetSubscribePattern(std::move(value)); return *this;} /** *

A Java regex string that identifies the topic list to subscribe to. You must * specify at least one of "topicName", "assign" or * "subscribePattern".

*/ inline KafkaStreamingSourceOptions& WithSubscribePattern(const char* value) { SetSubscribePattern(value); return *this;} /** *

An optional classification.

*/ inline const Aws::String& GetClassification() const{ return m_classification; } /** *

An optional classification.

*/ inline bool ClassificationHasBeenSet() const { return m_classificationHasBeenSet; } /** *

An optional classification.

*/ inline void SetClassification(const Aws::String& value) { m_classificationHasBeenSet = true; m_classification = value; } /** *

An optional classification.

*/ inline void SetClassification(Aws::String&& value) { m_classificationHasBeenSet = true; m_classification = std::move(value); } /** *

An optional classification.

*/ inline void SetClassification(const char* value) { m_classificationHasBeenSet = true; m_classification.assign(value); } /** *

An optional classification.

*/ inline KafkaStreamingSourceOptions& WithClassification(const Aws::String& value) { SetClassification(value); return *this;} /** *

An optional classification.

*/ inline KafkaStreamingSourceOptions& WithClassification(Aws::String&& value) { SetClassification(std::move(value)); return *this;} /** *

An optional classification.

*/ inline KafkaStreamingSourceOptions& WithClassification(const char* value) { SetClassification(value); return *this;} /** *

Specifies the delimiter character.

*/ inline const Aws::String& GetDelimiter() const{ return m_delimiter; } /** *

Specifies the delimiter character.

*/ inline bool DelimiterHasBeenSet() const { return m_delimiterHasBeenSet; } /** *

Specifies the delimiter character.

*/ inline void SetDelimiter(const Aws::String& value) { m_delimiterHasBeenSet = true; m_delimiter = value; } /** *

Specifies the delimiter character.

*/ inline void SetDelimiter(Aws::String&& value) { m_delimiterHasBeenSet = true; m_delimiter = std::move(value); } /** *

Specifies the delimiter character.

*/ inline void SetDelimiter(const char* value) { m_delimiterHasBeenSet = true; m_delimiter.assign(value); } /** *

Specifies the delimiter character.

*/ inline KafkaStreamingSourceOptions& WithDelimiter(const Aws::String& value) { SetDelimiter(value); return *this;} /** *

Specifies the delimiter character.

*/ inline KafkaStreamingSourceOptions& WithDelimiter(Aws::String&& value) { SetDelimiter(std::move(value)); return *this;} /** *

Specifies the delimiter character.

*/ inline KafkaStreamingSourceOptions& WithDelimiter(const char* value) { SetDelimiter(value); return *this;} /** *

The starting position in the Kafka topic to read data from. The possible * values are "earliest" or "latest". The default value * is "latest".

*/ inline const Aws::String& GetStartingOffsets() const{ return m_startingOffsets; } /** *

The starting position in the Kafka topic to read data from. The possible * values are "earliest" or "latest". The default value * is "latest".

*/ inline bool StartingOffsetsHasBeenSet() const { return m_startingOffsetsHasBeenSet; } /** *

The starting position in the Kafka topic to read data from. The possible * values are "earliest" or "latest". The default value * is "latest".

*/ inline void SetStartingOffsets(const Aws::String& value) { m_startingOffsetsHasBeenSet = true; m_startingOffsets = value; } /** *

The starting position in the Kafka topic to read data from. The possible * values are "earliest" or "latest". The default value * is "latest".

*/ inline void SetStartingOffsets(Aws::String&& value) { m_startingOffsetsHasBeenSet = true; m_startingOffsets = std::move(value); } /** *

The starting position in the Kafka topic to read data from. The possible * values are "earliest" or "latest". The default value * is "latest".

*/ inline void SetStartingOffsets(const char* value) { m_startingOffsetsHasBeenSet = true; m_startingOffsets.assign(value); } /** *

The starting position in the Kafka topic to read data from. The possible * values are "earliest" or "latest". The default value * is "latest".

*/ inline KafkaStreamingSourceOptions& WithStartingOffsets(const Aws::String& value) { SetStartingOffsets(value); return *this;} /** *

The starting position in the Kafka topic to read data from. The possible * values are "earliest" or "latest". The default value * is "latest".

*/ inline KafkaStreamingSourceOptions& WithStartingOffsets(Aws::String&& value) { SetStartingOffsets(std::move(value)); return *this;} /** *

The starting position in the Kafka topic to read data from. The possible * values are "earliest" or "latest". The default value * is "latest".

*/ inline KafkaStreamingSourceOptions& WithStartingOffsets(const char* value) { SetStartingOffsets(value); return *this;} /** *

The end point when a batch query is ended. Possible values are either * "latest" or a JSON string that specifies an ending offset for each * TopicPartition.

*/ inline const Aws::String& GetEndingOffsets() const{ return m_endingOffsets; } /** *

The end point when a batch query is ended. Possible values are either * "latest" or a JSON string that specifies an ending offset for each * TopicPartition.

*/ inline bool EndingOffsetsHasBeenSet() const { return m_endingOffsetsHasBeenSet; } /** *

The end point when a batch query is ended. Possible values are either * "latest" or a JSON string that specifies an ending offset for each * TopicPartition.

*/ inline void SetEndingOffsets(const Aws::String& value) { m_endingOffsetsHasBeenSet = true; m_endingOffsets = value; } /** *

The end point when a batch query is ended. Possible values are either * "latest" or a JSON string that specifies an ending offset for each * TopicPartition.

*/ inline void SetEndingOffsets(Aws::String&& value) { m_endingOffsetsHasBeenSet = true; m_endingOffsets = std::move(value); } /** *

The end point when a batch query is ended. Possible values are either * "latest" or a JSON string that specifies an ending offset for each * TopicPartition.

*/ inline void SetEndingOffsets(const char* value) { m_endingOffsetsHasBeenSet = true; m_endingOffsets.assign(value); } /** *

The end point when a batch query is ended. Possible values are either * "latest" or a JSON string that specifies an ending offset for each * TopicPartition.

*/ inline KafkaStreamingSourceOptions& WithEndingOffsets(const Aws::String& value) { SetEndingOffsets(value); return *this;} /** *

The end point when a batch query is ended. Possible values are either * "latest" or a JSON string that specifies an ending offset for each * TopicPartition.

*/ inline KafkaStreamingSourceOptions& WithEndingOffsets(Aws::String&& value) { SetEndingOffsets(std::move(value)); return *this;} /** *

The end point when a batch query is ended. Possible values are either * "latest" or a JSON string that specifies an ending offset for each * TopicPartition.

*/ inline KafkaStreamingSourceOptions& WithEndingOffsets(const char* value) { SetEndingOffsets(value); return *this;} /** *

The timeout in milliseconds to poll data from Kafka in Spark job executors. * The default value is 512.

*/ inline long long GetPollTimeoutMs() const{ return m_pollTimeoutMs; } /** *

The timeout in milliseconds to poll data from Kafka in Spark job executors. * The default value is 512.

*/ inline bool PollTimeoutMsHasBeenSet() const { return m_pollTimeoutMsHasBeenSet; } /** *

The timeout in milliseconds to poll data from Kafka in Spark job executors. * The default value is 512.

*/ inline void SetPollTimeoutMs(long long value) { m_pollTimeoutMsHasBeenSet = true; m_pollTimeoutMs = value; } /** *

The timeout in milliseconds to poll data from Kafka in Spark job executors. * The default value is 512.

*/ inline KafkaStreamingSourceOptions& WithPollTimeoutMs(long long value) { SetPollTimeoutMs(value); return *this;} /** *

The number of times to retry before failing to fetch Kafka offsets. The * default value is 3.

*/ inline int GetNumRetries() const{ return m_numRetries; } /** *

The number of times to retry before failing to fetch Kafka offsets. The * default value is 3.

*/ inline bool NumRetriesHasBeenSet() const { return m_numRetriesHasBeenSet; } /** *

The number of times to retry before failing to fetch Kafka offsets. The * default value is 3.

*/ inline void SetNumRetries(int value) { m_numRetriesHasBeenSet = true; m_numRetries = value; } /** *

The number of times to retry before failing to fetch Kafka offsets. The * default value is 3.

*/ inline KafkaStreamingSourceOptions& WithNumRetries(int value) { SetNumRetries(value); return *this;} /** *

The time in milliseconds to wait before retrying to fetch Kafka offsets. The * default value is 10.

*/ inline long long GetRetryIntervalMs() const{ return m_retryIntervalMs; } /** *

The time in milliseconds to wait before retrying to fetch Kafka offsets. The * default value is 10.

*/ inline bool RetryIntervalMsHasBeenSet() const { return m_retryIntervalMsHasBeenSet; } /** *

The time in milliseconds to wait before retrying to fetch Kafka offsets. The * default value is 10.

*/ inline void SetRetryIntervalMs(long long value) { m_retryIntervalMsHasBeenSet = true; m_retryIntervalMs = value; } /** *

The time in milliseconds to wait before retrying to fetch Kafka offsets. The * default value is 10.

*/ inline KafkaStreamingSourceOptions& WithRetryIntervalMs(long long value) { SetRetryIntervalMs(value); return *this;} /** *

The rate limit on the maximum number of offsets that are processed per * trigger interval. The specified total number of offsets is proportionally split * across topicPartitions of different volumes. The default value is * null, which means that the consumer reads all offsets until the known latest * offset.

*/ inline long long GetMaxOffsetsPerTrigger() const{ return m_maxOffsetsPerTrigger; } /** *

The rate limit on the maximum number of offsets that are processed per * trigger interval. The specified total number of offsets is proportionally split * across topicPartitions of different volumes. The default value is * null, which means that the consumer reads all offsets until the known latest * offset.

*/ inline bool MaxOffsetsPerTriggerHasBeenSet() const { return m_maxOffsetsPerTriggerHasBeenSet; } /** *

The rate limit on the maximum number of offsets that are processed per * trigger interval. The specified total number of offsets is proportionally split * across topicPartitions of different volumes. The default value is * null, which means that the consumer reads all offsets until the known latest * offset.

*/ inline void SetMaxOffsetsPerTrigger(long long value) { m_maxOffsetsPerTriggerHasBeenSet = true; m_maxOffsetsPerTrigger = value; } /** *

The rate limit on the maximum number of offsets that are processed per * trigger interval. The specified total number of offsets is proportionally split * across topicPartitions of different volumes. The default value is * null, which means that the consumer reads all offsets until the known latest * offset.

*/ inline KafkaStreamingSourceOptions& WithMaxOffsetsPerTrigger(long long value) { SetMaxOffsetsPerTrigger(value); return *this;} /** *

The desired minimum number of partitions to read from Kafka. The default * value is null, which means that the number of spark partitions is equal to the * number of Kafka partitions.

*/ inline int GetMinPartitions() const{ return m_minPartitions; } /** *

The desired minimum number of partitions to read from Kafka. The default * value is null, which means that the number of spark partitions is equal to the * number of Kafka partitions.

*/ inline bool MinPartitionsHasBeenSet() const { return m_minPartitionsHasBeenSet; } /** *

The desired minimum number of partitions to read from Kafka. The default * value is null, which means that the number of spark partitions is equal to the * number of Kafka partitions.

*/ inline void SetMinPartitions(int value) { m_minPartitionsHasBeenSet = true; m_minPartitions = value; } /** *

The desired minimum number of partitions to read from Kafka. The default * value is null, which means that the number of spark partitions is equal to the * number of Kafka partitions.

*/ inline KafkaStreamingSourceOptions& WithMinPartitions(int value) { SetMinPartitions(value); return *this;} /** *

Whether to include the Kafka headers. When the option is set to "true", the * data output will contain an additional column named * "glue_streaming_kafka_headers" with type Array[Struct(key: String, value: * String)]. The default value is "false". This option is available in Glue * version 3.0 or later only.

*/ inline bool GetIncludeHeaders() const{ return m_includeHeaders; } /** *

Whether to include the Kafka headers. When the option is set to "true", the * data output will contain an additional column named * "glue_streaming_kafka_headers" with type Array[Struct(key: String, value: * String)]. The default value is "false". This option is available in Glue * version 3.0 or later only.

*/ inline bool IncludeHeadersHasBeenSet() const { return m_includeHeadersHasBeenSet; } /** *

Whether to include the Kafka headers. When the option is set to "true", the * data output will contain an additional column named * "glue_streaming_kafka_headers" with type Array[Struct(key: String, value: * String)]. The default value is "false". This option is available in Glue * version 3.0 or later only.

*/ inline void SetIncludeHeaders(bool value) { m_includeHeadersHasBeenSet = true; m_includeHeaders = value; } /** *

Whether to include the Kafka headers. When the option is set to "true", the * data output will contain an additional column named * "glue_streaming_kafka_headers" with type Array[Struct(key: String, value: * String)]. The default value is "false". This option is available in Glue * version 3.0 or later only.

*/ inline KafkaStreamingSourceOptions& WithIncludeHeaders(bool value) { SetIncludeHeaders(value); return *this;} /** *

When this option is set to 'true', the data output will contain an additional * column named "__src_timestamp" that indicates the time when the corresponding * record received by the topic. The default value is 'false'. This option is * supported in Glue version 4.0 or later.

*/ inline const Aws::String& GetAddRecordTimestamp() const{ return m_addRecordTimestamp; } /** *

When this option is set to 'true', the data output will contain an additional * column named "__src_timestamp" that indicates the time when the corresponding * record received by the topic. The default value is 'false'. This option is * supported in Glue version 4.0 or later.

*/ inline bool AddRecordTimestampHasBeenSet() const { return m_addRecordTimestampHasBeenSet; } /** *

When this option is set to 'true', the data output will contain an additional * column named "__src_timestamp" that indicates the time when the corresponding * record received by the topic. The default value is 'false'. This option is * supported in Glue version 4.0 or later.

*/ inline void SetAddRecordTimestamp(const Aws::String& value) { m_addRecordTimestampHasBeenSet = true; m_addRecordTimestamp = value; } /** *

When this option is set to 'true', the data output will contain an additional * column named "__src_timestamp" that indicates the time when the corresponding * record received by the topic. The default value is 'false'. This option is * supported in Glue version 4.0 or later.

*/ inline void SetAddRecordTimestamp(Aws::String&& value) { m_addRecordTimestampHasBeenSet = true; m_addRecordTimestamp = std::move(value); } /** *

When this option is set to 'true', the data output will contain an additional * column named "__src_timestamp" that indicates the time when the corresponding * record received by the topic. The default value is 'false'. This option is * supported in Glue version 4.0 or later.

*/ inline void SetAddRecordTimestamp(const char* value) { m_addRecordTimestampHasBeenSet = true; m_addRecordTimestamp.assign(value); } /** *

When this option is set to 'true', the data output will contain an additional * column named "__src_timestamp" that indicates the time when the corresponding * record received by the topic. The default value is 'false'. This option is * supported in Glue version 4.0 or later.

*/ inline KafkaStreamingSourceOptions& WithAddRecordTimestamp(const Aws::String& value) { SetAddRecordTimestamp(value); return *this;} /** *

When this option is set to 'true', the data output will contain an additional * column named "__src_timestamp" that indicates the time when the corresponding * record received by the topic. The default value is 'false'. This option is * supported in Glue version 4.0 or later.

*/ inline KafkaStreamingSourceOptions& WithAddRecordTimestamp(Aws::String&& value) { SetAddRecordTimestamp(std::move(value)); return *this;} /** *

When this option is set to 'true', the data output will contain an additional * column named "__src_timestamp" that indicates the time when the corresponding * record received by the topic. The default value is 'false'. This option is * supported in Glue version 4.0 or later.

*/ inline KafkaStreamingSourceOptions& WithAddRecordTimestamp(const char* value) { SetAddRecordTimestamp(value); return *this;} /** *

When this option is set to 'true', for each batch, it will emit the metrics * for the duration between the oldest record received by the topic and the time it * arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This * option is supported in Glue version 4.0 or later.

*/ inline const Aws::String& GetEmitConsumerLagMetrics() const{ return m_emitConsumerLagMetrics; } /** *

When this option is set to 'true', for each batch, it will emit the metrics * for the duration between the oldest record received by the topic and the time it * arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This * option is supported in Glue version 4.0 or later.

*/ inline bool EmitConsumerLagMetricsHasBeenSet() const { return m_emitConsumerLagMetricsHasBeenSet; } /** *

When this option is set to 'true', for each batch, it will emit the metrics * for the duration between the oldest record received by the topic and the time it * arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This * option is supported in Glue version 4.0 or later.

*/ inline void SetEmitConsumerLagMetrics(const Aws::String& value) { m_emitConsumerLagMetricsHasBeenSet = true; m_emitConsumerLagMetrics = value; } /** *

When this option is set to 'true', for each batch, it will emit the metrics * for the duration between the oldest record received by the topic and the time it * arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This * option is supported in Glue version 4.0 or later.

*/ inline void SetEmitConsumerLagMetrics(Aws::String&& value) { m_emitConsumerLagMetricsHasBeenSet = true; m_emitConsumerLagMetrics = std::move(value); } /** *

When this option is set to 'true', for each batch, it will emit the metrics * for the duration between the oldest record received by the topic and the time it * arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This * option is supported in Glue version 4.0 or later.

*/ inline void SetEmitConsumerLagMetrics(const char* value) { m_emitConsumerLagMetricsHasBeenSet = true; m_emitConsumerLagMetrics.assign(value); } /** *

When this option is set to 'true', for each batch, it will emit the metrics * for the duration between the oldest record received by the topic and the time it * arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This * option is supported in Glue version 4.0 or later.

*/ inline KafkaStreamingSourceOptions& WithEmitConsumerLagMetrics(const Aws::String& value) { SetEmitConsumerLagMetrics(value); return *this;} /** *

When this option is set to 'true', for each batch, it will emit the metrics * for the duration between the oldest record received by the topic and the time it * arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This * option is supported in Glue version 4.0 or later.

*/ inline KafkaStreamingSourceOptions& WithEmitConsumerLagMetrics(Aws::String&& value) { SetEmitConsumerLagMetrics(std::move(value)); return *this;} /** *

When this option is set to 'true', for each batch, it will emit the metrics * for the duration between the oldest record received by the topic and the time it * arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This * option is supported in Glue version 4.0 or later.

*/ inline KafkaStreamingSourceOptions& WithEmitConsumerLagMetrics(const char* value) { SetEmitConsumerLagMetrics(value); return *this;} /** *

The timestamp of the record in the Kafka topic to start reading data from. * The possible values are a timestamp string in UTC format of the pattern * yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with * a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of * StartingTimestamp or StartingOffsets must be set.

*/ inline const Aws::Utils::DateTime& GetStartingTimestamp() const{ return m_startingTimestamp; } /** *

The timestamp of the record in the Kafka topic to start reading data from. * The possible values are a timestamp string in UTC format of the pattern * yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with * a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of * StartingTimestamp or StartingOffsets must be set.

*/ inline bool StartingTimestampHasBeenSet() const { return m_startingTimestampHasBeenSet; } /** *

The timestamp of the record in the Kafka topic to start reading data from. * The possible values are a timestamp string in UTC format of the pattern * yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with * a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of * StartingTimestamp or StartingOffsets must be set.

*/ inline void SetStartingTimestamp(const Aws::Utils::DateTime& value) { m_startingTimestampHasBeenSet = true; m_startingTimestamp = value; } /** *

The timestamp of the record in the Kafka topic to start reading data from. * The possible values are a timestamp string in UTC format of the pattern * yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with * a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of * StartingTimestamp or StartingOffsets must be set.

*/ inline void SetStartingTimestamp(Aws::Utils::DateTime&& value) { m_startingTimestampHasBeenSet = true; m_startingTimestamp = std::move(value); } /** *

The timestamp of the record in the Kafka topic to start reading data from. * The possible values are a timestamp string in UTC format of the pattern * yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with * a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of * StartingTimestamp or StartingOffsets must be set.

*/ inline KafkaStreamingSourceOptions& WithStartingTimestamp(const Aws::Utils::DateTime& value) { SetStartingTimestamp(value); return *this;} /** *

The timestamp of the record in the Kafka topic to start reading data from. * The possible values are a timestamp string in UTC format of the pattern * yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with * a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of * StartingTimestamp or StartingOffsets must be set.

*/ inline KafkaStreamingSourceOptions& WithStartingTimestamp(Aws::Utils::DateTime&& value) { SetStartingTimestamp(std::move(value)); return *this;} private: Aws::String m_bootstrapServers; bool m_bootstrapServersHasBeenSet = false; Aws::String m_securityProtocol; bool m_securityProtocolHasBeenSet = false; Aws::String m_connectionName; bool m_connectionNameHasBeenSet = false; Aws::String m_topicName; bool m_topicNameHasBeenSet = false; Aws::String m_assign; bool m_assignHasBeenSet = false; Aws::String m_subscribePattern; bool m_subscribePatternHasBeenSet = false; Aws::String m_classification; bool m_classificationHasBeenSet = false; Aws::String m_delimiter; bool m_delimiterHasBeenSet = false; Aws::String m_startingOffsets; bool m_startingOffsetsHasBeenSet = false; Aws::String m_endingOffsets; bool m_endingOffsetsHasBeenSet = false; long long m_pollTimeoutMs; bool m_pollTimeoutMsHasBeenSet = false; int m_numRetries; bool m_numRetriesHasBeenSet = false; long long m_retryIntervalMs; bool m_retryIntervalMsHasBeenSet = false; long long m_maxOffsetsPerTrigger; bool m_maxOffsetsPerTriggerHasBeenSet = false; int m_minPartitions; bool m_minPartitionsHasBeenSet = false; bool m_includeHeaders; bool m_includeHeadersHasBeenSet = false; Aws::String m_addRecordTimestamp; bool m_addRecordTimestampHasBeenSet = false; Aws::String m_emitConsumerLagMetrics; bool m_emitConsumerLagMetricsHasBeenSet = false; Aws::Utils::DateTime m_startingTimestamp; bool m_startingTimestampHasBeenSet = false; }; } // namespace Model } // namespace Glue } // namespace Aws