/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.glue.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* Additional connection options for the connector. *
* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class JDBCConnectorOptions implements Serializable, Cloneable, StructuredPojo { /** ** Extra condition clause to filter data from source. For example: *
*
* BillingCity='Mountain View'
*
* When using a query instead of a table name, you should validate that the query works with the specified
* filterPredicate
.
*
* The name of an integer column that is used for partitioning. This option works only when it's included with
* lowerBound
, upperBound
, and numPartitions
. This option works the same way
* as in the Spark SQL JDBC reader.
*
* The minimum value of partitionColumn
that is used to decide partition stride.
*
* The maximum value of partitionColumn
that is used to decide partition stride.
*
* The number of partitions. This value, along with lowerBound
(inclusive) and upperBound
* (exclusive), form partition strides for generated WHERE
clause expressions that are used to split
* the partitionColumn
.
*
* The name of the job bookmark keys on which to sort. *
*/ private java.util.List* Specifies an ascending or descending sort order. *
*/ private String jobBookmarkKeysSortOrder; /** *
* Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the
* option "dataTypeMapping":{"FLOAT":"STRING"}
maps data fields of JDBC type FLOAT
into
* the Java String
type by calling the ResultSet.getString()
method of the driver, and
* uses it to build the Glue record. The ResultSet
object is implemented by each driver, so the
* behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the
* driver performs the conversions.
*
* Extra condition clause to filter data from source. For example: *
*
* BillingCity='Mountain View'
*
* When using a query instead of a table name, you should validate that the query works with the specified
* filterPredicate
.
*
* BillingCity='Mountain View'
*
* When using a query instead of a table name, you should validate that the query works with the specified
* filterPredicate
.
*/
public void setFilterPredicate(String filterPredicate) {
this.filterPredicate = filterPredicate;
}
/**
*
* Extra condition clause to filter data from source. For example: *
*
* BillingCity='Mountain View'
*
* When using a query instead of a table name, you should validate that the query works with the specified
* filterPredicate
.
*
* BillingCity='Mountain View'
*
* When using a query instead of a table name, you should validate that the query works with the specified
* filterPredicate
.
*/
public String getFilterPredicate() {
return this.filterPredicate;
}
/**
*
* Extra condition clause to filter data from source. For example: *
*
* BillingCity='Mountain View'
*
* When using a query instead of a table name, you should validate that the query works with the specified
* filterPredicate
.
*
* BillingCity='Mountain View'
*
* When using a query instead of a table name, you should validate that the query works with the specified
* filterPredicate
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JDBCConnectorOptions withFilterPredicate(String filterPredicate) {
setFilterPredicate(filterPredicate);
return this;
}
/**
*
* The name of an integer column that is used for partitioning. This option works only when it's included with
* lowerBound
, upperBound
, and numPartitions
. This option works the same way
* as in the Spark SQL JDBC reader.
*
lowerBound
, upperBound
, and numPartitions
. This option works
* the same way as in the Spark SQL JDBC reader.
*/
public void setPartitionColumn(String partitionColumn) {
this.partitionColumn = partitionColumn;
}
/**
*
* The name of an integer column that is used for partitioning. This option works only when it's included with
* lowerBound
, upperBound
, and numPartitions
. This option works the same way
* as in the Spark SQL JDBC reader.
*
lowerBound
, upperBound
, and numPartitions
. This option works
* the same way as in the Spark SQL JDBC reader.
*/
public String getPartitionColumn() {
return this.partitionColumn;
}
/**
*
* The name of an integer column that is used for partitioning. This option works only when it's included with
* lowerBound
, upperBound
, and numPartitions
. This option works the same way
* as in the Spark SQL JDBC reader.
*
lowerBound
, upperBound
, and numPartitions
. This option works
* the same way as in the Spark SQL JDBC reader.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JDBCConnectorOptions withPartitionColumn(String partitionColumn) {
setPartitionColumn(partitionColumn);
return this;
}
/**
*
* The minimum value of partitionColumn
that is used to decide partition stride.
*
partitionColumn
that is used to decide partition stride.
*/
public void setLowerBound(Long lowerBound) {
this.lowerBound = lowerBound;
}
/**
*
* The minimum value of partitionColumn
that is used to decide partition stride.
*
partitionColumn
that is used to decide partition stride.
*/
public Long getLowerBound() {
return this.lowerBound;
}
/**
*
* The minimum value of partitionColumn
that is used to decide partition stride.
*
partitionColumn
that is used to decide partition stride.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JDBCConnectorOptions withLowerBound(Long lowerBound) {
setLowerBound(lowerBound);
return this;
}
/**
*
* The maximum value of partitionColumn
that is used to decide partition stride.
*
partitionColumn
that is used to decide partition stride.
*/
public void setUpperBound(Long upperBound) {
this.upperBound = upperBound;
}
/**
*
* The maximum value of partitionColumn
that is used to decide partition stride.
*
partitionColumn
that is used to decide partition stride.
*/
public Long getUpperBound() {
return this.upperBound;
}
/**
*
* The maximum value of partitionColumn
that is used to decide partition stride.
*
partitionColumn
that is used to decide partition stride.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JDBCConnectorOptions withUpperBound(Long upperBound) {
setUpperBound(upperBound);
return this;
}
/**
*
* The number of partitions. This value, along with lowerBound
(inclusive) and upperBound
* (exclusive), form partition strides for generated WHERE
clause expressions that are used to split
* the partitionColumn
.
*
lowerBound
(inclusive) and
* upperBound
(exclusive), form partition strides for generated WHERE
clause
* expressions that are used to split the partitionColumn
.
*/
public void setNumPartitions(Long numPartitions) {
this.numPartitions = numPartitions;
}
/**
*
* The number of partitions. This value, along with lowerBound
(inclusive) and upperBound
* (exclusive), form partition strides for generated WHERE
clause expressions that are used to split
* the partitionColumn
.
*
lowerBound
(inclusive) and
* upperBound
(exclusive), form partition strides for generated WHERE
clause
* expressions that are used to split the partitionColumn
.
*/
public Long getNumPartitions() {
return this.numPartitions;
}
/**
*
* The number of partitions. This value, along with lowerBound
(inclusive) and upperBound
* (exclusive), form partition strides for generated WHERE
clause expressions that are used to split
* the partitionColumn
.
*
lowerBound
(inclusive) and
* upperBound
(exclusive), form partition strides for generated WHERE
clause
* expressions that are used to split the partitionColumn
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JDBCConnectorOptions withNumPartitions(Long numPartitions) {
setNumPartitions(numPartitions);
return this;
}
/**
* * The name of the job bookmark keys on which to sort. *
* * @return The name of the job bookmark keys on which to sort. */ public java.util.List* The name of the job bookmark keys on which to sort. *
* * @param jobBookmarkKeys * The name of the job bookmark keys on which to sort. */ public void setJobBookmarkKeys(java.util.Collection* The name of the job bookmark keys on which to sort. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setJobBookmarkKeys(java.util.Collection)} or {@link #withJobBookmarkKeys(java.util.Collection)} if you * want to override the existing values. *
* * @param jobBookmarkKeys * The name of the job bookmark keys on which to sort. * @return Returns a reference to this object so that method calls can be chained together. */ public JDBCConnectorOptions withJobBookmarkKeys(String... jobBookmarkKeys) { if (this.jobBookmarkKeys == null) { setJobBookmarkKeys(new java.util.ArrayList* The name of the job bookmark keys on which to sort. *
* * @param jobBookmarkKeys * The name of the job bookmark keys on which to sort. * @return Returns a reference to this object so that method calls can be chained together. */ public JDBCConnectorOptions withJobBookmarkKeys(java.util.Collection* Specifies an ascending or descending sort order. *
* * @param jobBookmarkKeysSortOrder * Specifies an ascending or descending sort order. */ public void setJobBookmarkKeysSortOrder(String jobBookmarkKeysSortOrder) { this.jobBookmarkKeysSortOrder = jobBookmarkKeysSortOrder; } /** ** Specifies an ascending or descending sort order. *
* * @return Specifies an ascending or descending sort order. */ public String getJobBookmarkKeysSortOrder() { return this.jobBookmarkKeysSortOrder; } /** ** Specifies an ascending or descending sort order. *
* * @param jobBookmarkKeysSortOrder * Specifies an ascending or descending sort order. * @return Returns a reference to this object so that method calls can be chained together. */ public JDBCConnectorOptions withJobBookmarkKeysSortOrder(String jobBookmarkKeysSortOrder) { setJobBookmarkKeysSortOrder(jobBookmarkKeysSortOrder); return this; } /** *
* Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the
* option "dataTypeMapping":{"FLOAT":"STRING"}
maps data fields of JDBC type FLOAT
into
* the Java String
type by calling the ResultSet.getString()
method of the driver, and
* uses it to build the Glue record. The ResultSet
object is implemented by each driver, so the
* behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the
* driver performs the conversions.
*
"dataTypeMapping":{"FLOAT":"STRING"}
maps data fields of JDBC type
* FLOAT
into the Java String
type by calling the
* ResultSet.getString()
method of the driver, and uses it to build the Glue record. The
* ResultSet
object is implemented by each driver, so the behavior is specific to the driver
* you use. Refer to the documentation for your JDBC driver to understand how the driver performs the
* conversions.
*/
public java.util.Map
* Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the
* option "dataTypeMapping":{"FLOAT":"STRING"}
maps data fields of JDBC type FLOAT
into
* the Java String
type by calling the ResultSet.getString()
method of the driver, and
* uses it to build the Glue record. The ResultSet
object is implemented by each driver, so the
* behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the
* driver performs the conversions.
*
"dataTypeMapping":{"FLOAT":"STRING"}
maps data fields of JDBC type
* FLOAT
into the Java String
type by calling the
* ResultSet.getString()
method of the driver, and uses it to build the Glue record. The
* ResultSet
object is implemented by each driver, so the behavior is specific to the driver you
* use. Refer to the documentation for your JDBC driver to understand how the driver performs the
* conversions.
*/
public void setDataTypeMapping(java.util.Map
* Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the
* option "dataTypeMapping":{"FLOAT":"STRING"}
maps data fields of JDBC type FLOAT
into
* the Java String
type by calling the ResultSet.getString()
method of the driver, and
* uses it to build the Glue record. The ResultSet
object is implemented by each driver, so the
* behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the
* driver performs the conversions.
*
"dataTypeMapping":{"FLOAT":"STRING"}
maps data fields of JDBC type
* FLOAT
into the Java String
type by calling the
* ResultSet.getString()
method of the driver, and uses it to build the Glue record. The
* ResultSet
object is implemented by each driver, so the behavior is specific to the driver you
* use. Refer to the documentation for your JDBC driver to understand how the driver performs the
* conversions.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public JDBCConnectorOptions withDataTypeMapping(java.util.Map