/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.glue.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* Additional options for streaming. *
* * @see AWS * API Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class KafkaStreamingSourceOptions implements Serializable, Cloneable, StructuredPojo { /** *
* A list of bootstrap server URLs, for example, as
* b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified in the
* API call or defined in the table metadata in the Data Catalog.
*
* The protocol used to communicate with brokers. The possible values are "SSL"
or
* "PLAINTEXT"
.
*
* The name of the connection. *
*/ private String connectionName; /** *
* The topic name as specified in Apache Kafka. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
* The specific TopicPartitions
to consume. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
* A Java regex string that identifies the topic list to subscribe to. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*
* An optional classification. *
*/ private String classification; /** ** Specifies the delimiter character. *
*/ private String delimiter; /** *
* The starting position in the Kafka topic to read data from. The possible values are "earliest"
or
* "latest"
. The default value is "latest"
.
*
* The end point when a batch query is ended. Possible values are either "latest"
or a JSON string that
* specifies an ending offset for each TopicPartition
.
*
* The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512
* .
*
* The number of times to retry before failing to fetch Kafka offsets. The default value is 3
.
*
* The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10
.
*
* The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total
* number of offsets is proportionally split across topicPartitions
of different volumes. The default
* value is null, which means that the consumer reads all offsets until the known latest offset.
*
* The desired minimum number of partitions to read from Kafka. The default value is null, which means that the * number of spark partitions is equal to the number of Kafka partitions. *
*/ private Integer minPartitions; /** *
* Whether to include the Kafka headers. When the option is set to "true", the data output will contain an
* additional column named "glue_streaming_kafka_headers" with type
* Array[Struct(key: String, value: String)]
. The default value is "false". This option is available in
* Glue version 3.0 or later only.
*
* When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that * indicates the time when the corresponding record received by the topic. The default value is 'false'. This option * is supported in Glue version 4.0 or later. *
*/ private String addRecordTimestamp; /** ** When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest * record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue * version 4.0 or later. *
*/ private String emitConsumerLagMetrics; /** *
* The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp
* string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC timezone offset
* with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
*
* A list of bootstrap server URLs, for example, as
* b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified in the
* API call or defined in the table metadata in the Data Catalog.
*
b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified in
* the API call or defined in the table metadata in the Data Catalog.
*/
public void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
/**
*
* A list of bootstrap server URLs, for example, as
* b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified in the
* API call or defined in the table metadata in the Data Catalog.
*
b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified
* in the API call or defined in the table metadata in the Data Catalog.
*/
public String getBootstrapServers() {
return this.bootstrapServers;
}
/**
*
* A list of bootstrap server URLs, for example, as
* b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified in the
* API call or defined in the table metadata in the Data Catalog.
*
b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified in
* the API call or defined in the table metadata in the Data Catalog.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withBootstrapServers(String bootstrapServers) {
setBootstrapServers(bootstrapServers);
return this;
}
/**
*
* The protocol used to communicate with brokers. The possible values are "SSL"
or
* "PLAINTEXT"
.
*
"SSL"
or
* "PLAINTEXT"
.
*/
public void setSecurityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
}
/**
*
* The protocol used to communicate with brokers. The possible values are "SSL"
or
* "PLAINTEXT"
.
*
"SSL"
or
* "PLAINTEXT"
.
*/
public String getSecurityProtocol() {
return this.securityProtocol;
}
/**
*
* The protocol used to communicate with brokers. The possible values are "SSL"
or
* "PLAINTEXT"
.
*
"SSL"
or
* "PLAINTEXT"
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withSecurityProtocol(String securityProtocol) {
setSecurityProtocol(securityProtocol);
return this;
}
/**
* * The name of the connection. *
* * @param connectionName * The name of the connection. */ public void setConnectionName(String connectionName) { this.connectionName = connectionName; } /** ** The name of the connection. *
* * @return The name of the connection. */ public String getConnectionName() { return this.connectionName; } /** ** The name of the connection. *
* * @param connectionName * The name of the connection. * @return Returns a reference to this object so that method calls can be chained together. */ public KafkaStreamingSourceOptions withConnectionName(String connectionName) { setConnectionName(connectionName); return this; } /** *
* The topic name as specified in Apache Kafka. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
"topicName"
,
* "assign"
or "subscribePattern"
.
*/
public void setTopicName(String topicName) {
this.topicName = topicName;
}
/**
*
* The topic name as specified in Apache Kafka. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
"topicName"
,
* "assign"
or "subscribePattern"
.
*/
public String getTopicName() {
return this.topicName;
}
/**
*
* The topic name as specified in Apache Kafka. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
"topicName"
,
* "assign"
or "subscribePattern"
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withTopicName(String topicName) {
setTopicName(topicName);
return this;
}
/**
*
* The specific TopicPartitions
to consume. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
TopicPartitions
to consume. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*/
public void setAssign(String assign) {
this.assign = assign;
}
/**
*
* The specific TopicPartitions
to consume. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
TopicPartitions
to consume. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*/
public String getAssign() {
return this.assign;
}
/**
*
* The specific TopicPartitions
to consume. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
TopicPartitions
to consume. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withAssign(String assign) {
setAssign(assign);
return this;
}
/**
*
* A Java regex string that identifies the topic list to subscribe to. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*
"topicName"
, "assign"
or "subscribePattern"
.
*/
public void setSubscribePattern(String subscribePattern) {
this.subscribePattern = subscribePattern;
}
/**
*
* A Java regex string that identifies the topic list to subscribe to. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*
"topicName"
, "assign"
or "subscribePattern"
.
*/
public String getSubscribePattern() {
return this.subscribePattern;
}
/**
*
* A Java regex string that identifies the topic list to subscribe to. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*
"topicName"
, "assign"
or "subscribePattern"
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withSubscribePattern(String subscribePattern) {
setSubscribePattern(subscribePattern);
return this;
}
/**
* * An optional classification. *
* * @param classification * An optional classification. */ public void setClassification(String classification) { this.classification = classification; } /** ** An optional classification. *
* * @return An optional classification. */ public String getClassification() { return this.classification; } /** ** An optional classification. *
* * @param classification * An optional classification. * @return Returns a reference to this object so that method calls can be chained together. */ public KafkaStreamingSourceOptions withClassification(String classification) { setClassification(classification); return this; } /** ** Specifies the delimiter character. *
* * @param delimiter * Specifies the delimiter character. */ public void setDelimiter(String delimiter) { this.delimiter = delimiter; } /** ** Specifies the delimiter character. *
* * @return Specifies the delimiter character. */ public String getDelimiter() { return this.delimiter; } /** ** Specifies the delimiter character. *
* * @param delimiter * Specifies the delimiter character. * @return Returns a reference to this object so that method calls can be chained together. */ public KafkaStreamingSourceOptions withDelimiter(String delimiter) { setDelimiter(delimiter); return this; } /** *
* The starting position in the Kafka topic to read data from. The possible values are "earliest"
or
* "latest"
. The default value is "latest"
.
*
"earliest"
or "latest"
. The default value is "latest"
.
*/
public void setStartingOffsets(String startingOffsets) {
this.startingOffsets = startingOffsets;
}
/**
*
* The starting position in the Kafka topic to read data from. The possible values are "earliest"
or
* "latest"
. The default value is "latest"
.
*
"earliest"
or "latest"
. The default value is "latest"
.
*/
public String getStartingOffsets() {
return this.startingOffsets;
}
/**
*
* The starting position in the Kafka topic to read data from. The possible values are "earliest"
or
* "latest"
. The default value is "latest"
.
*
"earliest"
or "latest"
. The default value is "latest"
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withStartingOffsets(String startingOffsets) {
setStartingOffsets(startingOffsets);
return this;
}
/**
*
* The end point when a batch query is ended. Possible values are either "latest"
or a JSON string that
* specifies an ending offset for each TopicPartition
.
*
"latest"
or a JSON
* string that specifies an ending offset for each TopicPartition
.
*/
public void setEndingOffsets(String endingOffsets) {
this.endingOffsets = endingOffsets;
}
/**
*
* The end point when a batch query is ended. Possible values are either "latest"
or a JSON string that
* specifies an ending offset for each TopicPartition
.
*
"latest"
or a JSON
* string that specifies an ending offset for each TopicPartition
.
*/
public String getEndingOffsets() {
return this.endingOffsets;
}
/**
*
* The end point when a batch query is ended. Possible values are either "latest"
or a JSON string that
* specifies an ending offset for each TopicPartition
.
*
"latest"
or a JSON
* string that specifies an ending offset for each TopicPartition
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withEndingOffsets(String endingOffsets) {
setEndingOffsets(endingOffsets);
return this;
}
/**
*
* The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512
* .
*
512
.
*/
public void setPollTimeoutMs(Long pollTimeoutMs) {
this.pollTimeoutMs = pollTimeoutMs;
}
/**
*
* The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512
* .
*
512
.
*/
public Long getPollTimeoutMs() {
return this.pollTimeoutMs;
}
/**
*
* The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512
* .
*
512
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withPollTimeoutMs(Long pollTimeoutMs) {
setPollTimeoutMs(pollTimeoutMs);
return this;
}
/**
*
* The number of times to retry before failing to fetch Kafka offsets. The default value is 3
.
*
3
.
*/
public void setNumRetries(Integer numRetries) {
this.numRetries = numRetries;
}
/**
*
* The number of times to retry before failing to fetch Kafka offsets. The default value is 3
.
*
3
.
*/
public Integer getNumRetries() {
return this.numRetries;
}
/**
*
* The number of times to retry before failing to fetch Kafka offsets. The default value is 3
.
*
3
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withNumRetries(Integer numRetries) {
setNumRetries(numRetries);
return this;
}
/**
*
* The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10
.
*
10
.
*/
public void setRetryIntervalMs(Long retryIntervalMs) {
this.retryIntervalMs = retryIntervalMs;
}
/**
*
* The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10
.
*
10
.
*/
public Long getRetryIntervalMs() {
return this.retryIntervalMs;
}
/**
*
* The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10
.
*
10
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withRetryIntervalMs(Long retryIntervalMs) {
setRetryIntervalMs(retryIntervalMs);
return this;
}
/**
*
* The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total
* number of offsets is proportionally split across topicPartitions
of different volumes. The default
* value is null, which means that the consumer reads all offsets until the known latest offset.
*
topicPartitions
of different volumes.
* The default value is null, which means that the consumer reads all offsets until the known latest offset.
*/
public void setMaxOffsetsPerTrigger(Long maxOffsetsPerTrigger) {
this.maxOffsetsPerTrigger = maxOffsetsPerTrigger;
}
/**
*
* The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total
* number of offsets is proportionally split across topicPartitions
of different volumes. The default
* value is null, which means that the consumer reads all offsets until the known latest offset.
*
topicPartitions
of different volumes.
* The default value is null, which means that the consumer reads all offsets until the known latest offset.
*/
public Long getMaxOffsetsPerTrigger() {
return this.maxOffsetsPerTrigger;
}
/**
*
* The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total
* number of offsets is proportionally split across topicPartitions
of different volumes. The default
* value is null, which means that the consumer reads all offsets until the known latest offset.
*
topicPartitions
of different volumes.
* The default value is null, which means that the consumer reads all offsets until the known latest offset.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withMaxOffsetsPerTrigger(Long maxOffsetsPerTrigger) {
setMaxOffsetsPerTrigger(maxOffsetsPerTrigger);
return this;
}
/**
* * The desired minimum number of partitions to read from Kafka. The default value is null, which means that the * number of spark partitions is equal to the number of Kafka partitions. *
* * @param minPartitions * The desired minimum number of partitions to read from Kafka. The default value is null, which means that * the number of spark partitions is equal to the number of Kafka partitions. */ public void setMinPartitions(Integer minPartitions) { this.minPartitions = minPartitions; } /** ** The desired minimum number of partitions to read from Kafka. The default value is null, which means that the * number of spark partitions is equal to the number of Kafka partitions. *
* * @return The desired minimum number of partitions to read from Kafka. The default value is null, which means that * the number of spark partitions is equal to the number of Kafka partitions. */ public Integer getMinPartitions() { return this.minPartitions; } /** ** The desired minimum number of partitions to read from Kafka. The default value is null, which means that the * number of spark partitions is equal to the number of Kafka partitions. *
* * @param minPartitions * The desired minimum number of partitions to read from Kafka. The default value is null, which means that * the number of spark partitions is equal to the number of Kafka partitions. * @return Returns a reference to this object so that method calls can be chained together. */ public KafkaStreamingSourceOptions withMinPartitions(Integer minPartitions) { setMinPartitions(minPartitions); return this; } /** *
* Whether to include the Kafka headers. When the option is set to "true", the data output will contain an
* additional column named "glue_streaming_kafka_headers" with type
* Array[Struct(key: String, value: String)]
. The default value is "false". This option is available in
* Glue version 3.0 or later only.
*
Array[Struct(key: String, value: String)]
. The default value is "false". This option is
* available in Glue version 3.0 or later only.
*/
public void setIncludeHeaders(Boolean includeHeaders) {
this.includeHeaders = includeHeaders;
}
/**
*
* Whether to include the Kafka headers. When the option is set to "true", the data output will contain an
* additional column named "glue_streaming_kafka_headers" with type
* Array[Struct(key: String, value: String)]
. The default value is "false". This option is available in
* Glue version 3.0 or later only.
*
Array[Struct(key: String, value: String)]
. The default value is "false". This option is
* available in Glue version 3.0 or later only.
*/
public Boolean getIncludeHeaders() {
return this.includeHeaders;
}
/**
*
* Whether to include the Kafka headers. When the option is set to "true", the data output will contain an
* additional column named "glue_streaming_kafka_headers" with type
* Array[Struct(key: String, value: String)]
. The default value is "false". This option is available in
* Glue version 3.0 or later only.
*
Array[Struct(key: String, value: String)]
. The default value is "false". This option is
* available in Glue version 3.0 or later only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withIncludeHeaders(Boolean includeHeaders) {
setIncludeHeaders(includeHeaders);
return this;
}
/**
*
* Whether to include the Kafka headers. When the option is set to "true", the data output will contain an
* additional column named "glue_streaming_kafka_headers" with type
* Array[Struct(key: String, value: String)]
. The default value is "false". This option is available in
* Glue version 3.0 or later only.
*
Array[Struct(key: String, value: String)]
. The default value is "false". This option is
* available in Glue version 3.0 or later only.
*/
public Boolean isIncludeHeaders() {
return this.includeHeaders;
}
/**
* * When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that * indicates the time when the corresponding record received by the topic. The default value is 'false'. This option * is supported in Glue version 4.0 or later. *
* * @param addRecordTimestamp * When this option is set to 'true', the data output will contain an additional column named * "__src_timestamp" that indicates the time when the corresponding record received by the topic. The default * value is 'false'. This option is supported in Glue version 4.0 or later. */ public void setAddRecordTimestamp(String addRecordTimestamp) { this.addRecordTimestamp = addRecordTimestamp; } /** ** When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that * indicates the time when the corresponding record received by the topic. The default value is 'false'. This option * is supported in Glue version 4.0 or later. *
* * @return When this option is set to 'true', the data output will contain an additional column named * "__src_timestamp" that indicates the time when the corresponding record received by the topic. The * default value is 'false'. This option is supported in Glue version 4.0 or later. */ public String getAddRecordTimestamp() { return this.addRecordTimestamp; } /** ** When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that * indicates the time when the corresponding record received by the topic. The default value is 'false'. This option * is supported in Glue version 4.0 or later. *
* * @param addRecordTimestamp * When this option is set to 'true', the data output will contain an additional column named * "__src_timestamp" that indicates the time when the corresponding record received by the topic. The default * value is 'false'. This option is supported in Glue version 4.0 or later. * @return Returns a reference to this object so that method calls can be chained together. */ public KafkaStreamingSourceOptions withAddRecordTimestamp(String addRecordTimestamp) { setAddRecordTimestamp(addRecordTimestamp); return this; } /** ** When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest * record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue * version 4.0 or later. *
* * @param emitConsumerLagMetrics * When this option is set to 'true', for each batch, it will emit the metrics for the duration between the * oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue * version 4.0 or later. */ public void setEmitConsumerLagMetrics(String emitConsumerLagMetrics) { this.emitConsumerLagMetrics = emitConsumerLagMetrics; } /** ** When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest * record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue * version 4.0 or later. *
* * @return When this option is set to 'true', for each batch, it will emit the metrics for the duration between the * oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in * Glue version 4.0 or later. */ public String getEmitConsumerLagMetrics() { return this.emitConsumerLagMetrics; } /** ** When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest * record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue * version 4.0 or later. *
* * @param emitConsumerLagMetrics * When this option is set to 'true', for each batch, it will emit the metrics for the duration between the * oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is * "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue * version 4.0 or later. * @return Returns a reference to this object so that method calls can be chained together. */ public KafkaStreamingSourceOptions withEmitConsumerLagMetrics(String emitConsumerLagMetrics) { setEmitConsumerLagMetrics(emitConsumerLagMetrics); return this; } /** *
* The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp
* string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC timezone offset
* with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
*
yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC
* timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
*/
public void setStartingTimestamp(java.util.Date startingTimestamp) {
this.startingTimestamp = startingTimestamp;
}
/**
*
* The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp
* string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC timezone offset
* with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
*
yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC
* timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
*/
public java.util.Date getStartingTimestamp() {
return this.startingTimestamp;
}
/**
*
* The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp
* string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC timezone offset
* with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
*
yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC
* timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaStreamingSourceOptions withStartingTimestamp(java.util.Date startingTimestamp) {
setStartingTimestamp(startingTimestamp);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getBootstrapServers() != null)
sb.append("BootstrapServers: ").append(getBootstrapServers()).append(",");
if (getSecurityProtocol() != null)
sb.append("SecurityProtocol: ").append(getSecurityProtocol()).append(",");
if (getConnectionName() != null)
sb.append("ConnectionName: ").append(getConnectionName()).append(",");
if (getTopicName() != null)
sb.append("TopicName: ").append(getTopicName()).append(",");
if (getAssign() != null)
sb.append("Assign: ").append(getAssign()).append(",");
if (getSubscribePattern() != null)
sb.append("SubscribePattern: ").append(getSubscribePattern()).append(",");
if (getClassification() != null)
sb.append("Classification: ").append(getClassification()).append(",");
if (getDelimiter() != null)
sb.append("Delimiter: ").append(getDelimiter()).append(",");
if (getStartingOffsets() != null)
sb.append("StartingOffsets: ").append(getStartingOffsets()).append(",");
if (getEndingOffsets() != null)
sb.append("EndingOffsets: ").append(getEndingOffsets()).append(",");
if (getPollTimeoutMs() != null)
sb.append("PollTimeoutMs: ").append(getPollTimeoutMs()).append(",");
if (getNumRetries() != null)
sb.append("NumRetries: ").append(getNumRetries()).append(",");
if (getRetryIntervalMs() != null)
sb.append("RetryIntervalMs: ").append(getRetryIntervalMs()).append(",");
if (getMaxOffsetsPerTrigger() != null)
sb.append("MaxOffsetsPerTrigger: ").append(getMaxOffsetsPerTrigger()).append(",");
if (getMinPartitions() != null)
sb.append("MinPartitions: ").append(getMinPartitions()).append(",");
if (getIncludeHeaders() != null)
sb.append("IncludeHeaders: ").append(getIncludeHeaders()).append(",");
if (getAddRecordTimestamp() != null)
sb.append("AddRecordTimestamp: ").append(getAddRecordTimestamp()).append(",");
if (getEmitConsumerLagMetrics() != null)
sb.append("EmitConsumerLagMetrics: ").append(getEmitConsumerLagMetrics()).append(",");
if (getStartingTimestamp() != null)
sb.append("StartingTimestamp: ").append(getStartingTimestamp());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof KafkaStreamingSourceOptions == false)
return false;
KafkaStreamingSourceOptions other = (KafkaStreamingSourceOptions) obj;
if (other.getBootstrapServers() == null ^ this.getBootstrapServers() == null)
return false;
if (other.getBootstrapServers() != null && other.getBootstrapServers().equals(this.getBootstrapServers()) == false)
return false;
if (other.getSecurityProtocol() == null ^ this.getSecurityProtocol() == null)
return false;
if (other.getSecurityProtocol() != null && other.getSecurityProtocol().equals(this.getSecurityProtocol()) == false)
return false;
if (other.getConnectionName() == null ^ this.getConnectionName() == null)
return false;
if (other.getConnectionName() != null && other.getConnectionName().equals(this.getConnectionName()) == false)
return false;
if (other.getTopicName() == null ^ this.getTopicName() == null)
return false;
if (other.getTopicName() != null && other.getTopicName().equals(this.getTopicName()) == false)
return false;
if (other.getAssign() == null ^ this.getAssign() == null)
return false;
if (other.getAssign() != null && other.getAssign().equals(this.getAssign()) == false)
return false;
if (other.getSubscribePattern() == null ^ this.getSubscribePattern() == null)
return false;
if (other.getSubscribePattern() != null && other.getSubscribePattern().equals(this.getSubscribePattern()) == false)
return false;
if (other.getClassification() == null ^ this.getClassification() == null)
return false;
if (other.getClassification() != null && other.getClassification().equals(this.getClassification()) == false)
return false;
if (other.getDelimiter() == null ^ this.getDelimiter() == null)
return false;
if (other.getDelimiter() != null && other.getDelimiter().equals(this.getDelimiter()) == false)
return false;
if (other.getStartingOffsets() == null ^ this.getStartingOffsets() == null)
return false;
if (other.getStartingOffsets() != null && other.getStartingOffsets().equals(this.getStartingOffsets()) == false)
return false;
if (other.getEndingOffsets() == null ^ this.getEndingOffsets() == null)
return false;
if (other.getEndingOffsets() != null && other.getEndingOffsets().equals(this.getEndingOffsets()) == false)
return false;
if (other.getPollTimeoutMs() == null ^ this.getPollTimeoutMs() == null)
return false;
if (other.getPollTimeoutMs() != null && other.getPollTimeoutMs().equals(this.getPollTimeoutMs()) == false)
return false;
if (other.getNumRetries() == null ^ this.getNumRetries() == null)
return false;
if (other.getNumRetries() != null && other.getNumRetries().equals(this.getNumRetries()) == false)
return false;
if (other.getRetryIntervalMs() == null ^ this.getRetryIntervalMs() == null)
return false;
if (other.getRetryIntervalMs() != null && other.getRetryIntervalMs().equals(this.getRetryIntervalMs()) == false)
return false;
if (other.getMaxOffsetsPerTrigger() == null ^ this.getMaxOffsetsPerTrigger() == null)
return false;
if (other.getMaxOffsetsPerTrigger() != null && other.getMaxOffsetsPerTrigger().equals(this.getMaxOffsetsPerTrigger()) == false)
return false;
if (other.getMinPartitions() == null ^ this.getMinPartitions() == null)
return false;
if (other.getMinPartitions() != null && other.getMinPartitions().equals(this.getMinPartitions()) == false)
return false;
if (other.getIncludeHeaders() == null ^ this.getIncludeHeaders() == null)
return false;
if (other.getIncludeHeaders() != null && other.getIncludeHeaders().equals(this.getIncludeHeaders()) == false)
return false;
if (other.getAddRecordTimestamp() == null ^ this.getAddRecordTimestamp() == null)
return false;
if (other.getAddRecordTimestamp() != null && other.getAddRecordTimestamp().equals(this.getAddRecordTimestamp()) == false)
return false;
if (other.getEmitConsumerLagMetrics() == null ^ this.getEmitConsumerLagMetrics() == null)
return false;
if (other.getEmitConsumerLagMetrics() != null && other.getEmitConsumerLagMetrics().equals(this.getEmitConsumerLagMetrics()) == false)
return false;
if (other.getStartingTimestamp() == null ^ this.getStartingTimestamp() == null)
return false;
if (other.getStartingTimestamp() != null && other.getStartingTimestamp().equals(this.getStartingTimestamp()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getBootstrapServers() == null) ? 0 : getBootstrapServers().hashCode());
hashCode = prime * hashCode + ((getSecurityProtocol() == null) ? 0 : getSecurityProtocol().hashCode());
hashCode = prime * hashCode + ((getConnectionName() == null) ? 0 : getConnectionName().hashCode());
hashCode = prime * hashCode + ((getTopicName() == null) ? 0 : getTopicName().hashCode());
hashCode = prime * hashCode + ((getAssign() == null) ? 0 : getAssign().hashCode());
hashCode = prime * hashCode + ((getSubscribePattern() == null) ? 0 : getSubscribePattern().hashCode());
hashCode = prime * hashCode + ((getClassification() == null) ? 0 : getClassification().hashCode());
hashCode = prime * hashCode + ((getDelimiter() == null) ? 0 : getDelimiter().hashCode());
hashCode = prime * hashCode + ((getStartingOffsets() == null) ? 0 : getStartingOffsets().hashCode());
hashCode = prime * hashCode + ((getEndingOffsets() == null) ? 0 : getEndingOffsets().hashCode());
hashCode = prime * hashCode + ((getPollTimeoutMs() == null) ? 0 : getPollTimeoutMs().hashCode());
hashCode = prime * hashCode + ((getNumRetries() == null) ? 0 : getNumRetries().hashCode());
hashCode = prime * hashCode + ((getRetryIntervalMs() == null) ? 0 : getRetryIntervalMs().hashCode());
hashCode = prime * hashCode + ((getMaxOffsetsPerTrigger() == null) ? 0 : getMaxOffsetsPerTrigger().hashCode());
hashCode = prime * hashCode + ((getMinPartitions() == null) ? 0 : getMinPartitions().hashCode());
hashCode = prime * hashCode + ((getIncludeHeaders() == null) ? 0 : getIncludeHeaders().hashCode());
hashCode = prime * hashCode + ((getAddRecordTimestamp() == null) ? 0 : getAddRecordTimestamp().hashCode());
hashCode = prime * hashCode + ((getEmitConsumerLagMetrics() == null) ? 0 : getEmitConsumerLagMetrics().hashCode());
hashCode = prime * hashCode + ((getStartingTimestamp() == null) ? 0 : getStartingTimestamp().hashCode());
return hashCode;
}
@Override
public KafkaStreamingSourceOptions clone() {
try {
return (KafkaStreamingSourceOptions) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.glue.model.transform.KafkaStreamingSourceOptionsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}