/*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift;
import com.amazon.redshift.copy.CopyManager;
import com.amazon.redshift.fastpath.Fastpath;
import com.amazon.redshift.jdbc.AutoSave;
import com.amazon.redshift.jdbc.PreferQueryMode;
import com.amazon.redshift.largeobject.LargeObjectManager;
import com.amazon.redshift.replication.RedshiftReplicationConnection;
import com.amazon.redshift.util.RedshiftObject;
import java.sql.Array;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Map;
/**
* This interface defines the public Redshift extensions to java.sql.Connection. All Connections
* returned by the Redshift driver implement RedshiftConnection.
*/
public interface RedshiftConnection {
/**
* Creates an {@link Array} wrapping elements. This is similar to
* {@link java.sql.Connection#createArrayOf(String, Object[])}, but also
* provides support for primitive arrays.
*
* @param typeName
* The SQL name of the type to map the elements to.
* Must not be {@code null}.
* @param elements
* The array of objects to map. A {@code null} value will result in
* an {@link Array} representing {@code null}.
* @return An {@link Array} wrapping elements.
* @throws SQLException
* If for some reason the array cannot be created.
* @see java.sql.Connection#createArrayOf(String, Object[])
*/
Array createArrayOf(String typeName, Object elements) throws SQLException;
/**
* This method returns any notifications that have been received since the last call to this
* method. Returns null if there have been no notifications.
*
* @return notifications that have been received
* @throws SQLException if something wrong happens
* @since 7.3
*/
RedshiftNotification[] getNotifications() throws SQLException;
/**
* This method returns any notifications that have been received since the last call to this
* method. Returns null if there have been no notifications. A timeout can be specified so the
* driver waits for notifications.
*
* @param timeoutMillis when 0, blocks forever. when > 0, blocks up to the specified number of millies
* or until at least one notification has been received. If more than one notification is
* about to be received, these will be returned in one batch.
* @return notifications that have been received
* @throws SQLException if something wrong happens
* @since 43
*/
RedshiftNotification[] getNotifications(int timeoutMillis) throws SQLException;
/**
* This returns the COPY API for the current connection.
*
* @return COPY API for the current connection
* @throws SQLException if something wrong happens
* @since 8.4
*/
CopyManager getCopyAPI() throws SQLException;
/**
* This returns the LargeObject API for the current connection.
*
* @return LargeObject API for the current connection
* @throws SQLException if something wrong happens
* @since 7.3
*/
LargeObjectManager getLargeObjectAPI() throws SQLException;
/**
* This returns the Fastpath API for the current connection.
*
* @return Fastpath API for the current connection
* @throws SQLException if something wrong happens
* @since 7.3
* @deprecated This API is somewhat obsolete, as one may achieve similar performance
* and greater functionality by setting up a prepared statement to define
* the function call. Then, executing the statement with binary transmission of parameters
* and results substitutes for a fast-path function call.
*/
@Deprecated
Fastpath getFastpathAPI() throws SQLException;
/**
* This allows client code to add a handler for one of com.amazon.redshift's more unique data types. It
* is approximately equivalent to addDataType(type, Class.forName(name))
.
*
* @param type JDBC type name
* @param className class name
* @throws RuntimeException if the type cannot be registered (class not found, etc).
* @deprecated As of 8.0, replaced by {@link #addDataType(String, Class)}. This deprecated method
* does not work correctly for registering classes that cannot be directly loaded by
* the JDBC driver's classloader.
*/
@Deprecated
void addDataType(String type, String className);
/**
*
This allows client code to add a handler for one of com.amazon.redshift's more unique data types.
* *NOTE: This is not part of JDBC, but an extension.
* *The best way to use this is as follows:
* ** ... * ((com.amazon.redshift.RedshiftConnection)myconn).addDataType("mytype", my.class.name.class); * ... ** *
where myconn is an open Connection to com.amazon.redshift.
* *The handling class must extend com.amazon.redshift.util.RedshiftObject
* * @param type the Redshift type to register * @param klass the class implementing the Java representation of the type; this class must * implement {@link com.amazon.redshift.util.RedshiftObject}). * @throws SQLException ifklass
does not implement
* {@link com.amazon.redshift.util.RedshiftObject}).
* @see com.amazon.redshift.util.RedshiftObject
* @since 8.0
*/
void addDataType(String type, Class extends RedshiftObject> klass) throws SQLException;
/**
* Set the default statement reuse threshold before enabling server-side prepare. See
* {@link com.amazon.redshift.RedshiftStatement#setPrepareThreshold(int)} for details.
*
* @param threshold the new threshold
* @since build 302
*/
void setPrepareThreshold(int threshold);
/**
* Get the default server-side prepare reuse threshold for statements created from this
* connection.
*
* @return the current threshold
* @since build 302
*/
int getPrepareThreshold();
/**
* Set the default fetch size for statements created from this connection.
*
* @param fetchSize new default fetch size
* @throws SQLException if specified negative fetchSize
parameter
* @see Statement#setFetchSize(int)
*/
void setDefaultFetchSize(int fetchSize) throws SQLException;
/**
* Get the default fetch size for statements created from this connection.
*
* @return current state for default fetch size
* @see RedshiftProperty#DEFAULT_ROW_FETCH_SIZE
* @see Statement#getFetchSize()
*/
int getDefaultFetchSize();
/**
* Return the process ID (PID) of the backend server process handling this connection.
*
* @return PID of backend server process.
*/
int getBackendPID();
/**
* Sends a query cancellation for this connection.
* @throws SQLException if there are problems cancelling the query
*/
void cancelQuery() throws SQLException;
/**
* Return the given string suitably quoted to be used as an identifier in an SQL statement string.
* Quotes are added only if necessary (i.e., if the string contains non-identifier characters or
* would be case-folded). Embedded quotes are properly doubled.
*
* @param identifier input identifier
* @return the escaped identifier
* @throws SQLException if something goes wrong
*/
String escapeIdentifier(String identifier) throws SQLException;
/**
* Return the given string suitably quoted to be used as a string literal in an SQL statement
* string. Embedded single-quotes and backslashes are properly doubled. Note that quote_literal
* returns null on null input.
*
* @param literal input literal
* @return the quoted literal
* @throws SQLException if something goes wrong
*/
String escapeLiteral(String literal) throws SQLException;
/**
* Returns the query mode for this connection.
* *When running in simple query mode, certain features are not available: callable statements, * partial result set fetch, bytea type, etc.
*The list of supported features is subject to change.
* * @return the preferred query mode * @see PreferQueryMode */ PreferQueryMode getPreferQueryMode(); /** * Connection configuration regarding automatic per-query savepoints. * * @return connection configuration regarding automatic per-query savepoints * @see RedshiftProperty#AUTOSAVE */ AutoSave getAutosave(); /** * Configures if connection should use automatic savepoints. * @param autoSave connection configuration regarding automatic per-query savepoints * @see RedshiftProperty#AUTOSAVE */ void setAutosave(AutoSave autoSave); /** * @return replication API for the current connection */ RedshiftReplicationConnection getReplicationAPI(); /** *Returns the current values of all parameters reported by the server.
* *Redshift reports values for a subset of parameters (GUCs) to the client
* at connect-time, then sends update messages whenever the values change
* during a session. PgJDBC records the latest values and exposes it to client
* applications via getParameterStatuses()
.
PgJDBC exposes individual accessors for some of these parameters as * listed below. They are more backwarrds-compatible and should be preferred * where possible.
* *Not all parameters are reported, only those marked
* GUC_REPORT
in the source code. The pg_settings
* view does not expose information about which parameters are reportable.
* PgJDBC's map will only contain the parameters the server reports values
* for, so you cannot use this method as a substitute for running a
* SHOW paramname;
or SELECT
* current_setting('paramname');
query for arbitrary parameters.
Parameter names are case-insensitive and case-preserving
* in this map, like in Redshift itself. So DateStyle
and
* datestyle
are the same key.
* As of PostgreSQL 11 the reportable parameter list, and related PgJDBC * interfaces or accesors, are: *
* *application_name
-
* {@link java.sql.Connection#getClientInfo()},
* {@link java.sql.Connection#setClientInfo(java.util.Properties)}
* and ApplicationName
connection property.
* client_encoding
- PgJDBC always sets this to UTF8
.
* See allowEncodingChanges
connection property.
* DateStyle
- PgJDBC requires this to always be set to ISO
standard_conforming_strings
- indirectly via {@link #escapeLiteral(String)}TimeZone
- set from JDK timezone see {@link java.util.TimeZone#getDefault()}
* and {@link java.util.TimeZone#setDefault(TimeZone)}
* integer_datetimes
IntervalStyle
server_encoding
server_version
is_superuser
session_authorization
Note that some PgJDBC operations will change server parameters * automatically.
* * @return unmodifiable map of case-insensitive parameter names to parameter values * @since 42.2.6 */ Map