/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.glue.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a
* single DynamicFrame
.
*
* The name of the transform node. *
*/ private String name; /** ** The data inputs identified by their node names. You can associate a table name with each input node to use in the * SQL query. The name you choose must meet the Spark SQL naming restrictions. *
*/ private java.util.List* A SQL query that must use Spark SQL syntax and return a single data set. *
*/ private String sqlQuery; /** *
* A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you
* have a datasource named "MyDataSource". If you specify From
as MyDataSource, and Alias
* as SqlName, then in your SQL you can do:
*
* select * from SqlName
*
* and that gets data from MyDataSource. *
*/ private java.util.List* Specifies the data schema for the SparkSQL transform. *
*/ private java.util.List* The name of the transform node. *
* * @param name * The name of the transform node. */ public void setName(String name) { this.name = name; } /** ** The name of the transform node. *
* * @return The name of the transform node. */ public String getName() { return this.name; } /** ** The name of the transform node. *
* * @param name * The name of the transform node. * @return Returns a reference to this object so that method calls can be chained together. */ public SparkSQL withName(String name) { setName(name); return this; } /** ** The data inputs identified by their node names. You can associate a table name with each input node to use in the * SQL query. The name you choose must meet the Spark SQL naming restrictions. *
* * @return The data inputs identified by their node names. You can associate a table name with each input node to * use in the SQL query. The name you choose must meet the Spark SQL naming restrictions. */ public java.util.List* The data inputs identified by their node names. You can associate a table name with each input node to use in the * SQL query. The name you choose must meet the Spark SQL naming restrictions. *
* * @param inputs * The data inputs identified by their node names. You can associate a table name with each input node to use * in the SQL query. The name you choose must meet the Spark SQL naming restrictions. */ public void setInputs(java.util.Collection* The data inputs identified by their node names. You can associate a table name with each input node to use in the * SQL query. The name you choose must meet the Spark SQL naming restrictions. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setInputs(java.util.Collection)} or {@link #withInputs(java.util.Collection)} if you want to override the * existing values. *
* * @param inputs * The data inputs identified by their node names. You can associate a table name with each input node to use * in the SQL query. The name you choose must meet the Spark SQL naming restrictions. * @return Returns a reference to this object so that method calls can be chained together. */ public SparkSQL withInputs(String... inputs) { if (this.inputs == null) { setInputs(new java.util.ArrayList* The data inputs identified by their node names. You can associate a table name with each input node to use in the * SQL query. The name you choose must meet the Spark SQL naming restrictions. *
* * @param inputs * The data inputs identified by their node names. You can associate a table name with each input node to use * in the SQL query. The name you choose must meet the Spark SQL naming restrictions. * @return Returns a reference to this object so that method calls can be chained together. */ public SparkSQL withInputs(java.util.Collection* A SQL query that must use Spark SQL syntax and return a single data set. *
* * @param sqlQuery * A SQL query that must use Spark SQL syntax and return a single data set. */ public void setSqlQuery(String sqlQuery) { this.sqlQuery = sqlQuery; } /** ** A SQL query that must use Spark SQL syntax and return a single data set. *
* * @return A SQL query that must use Spark SQL syntax and return a single data set. */ public String getSqlQuery() { return this.sqlQuery; } /** ** A SQL query that must use Spark SQL syntax and return a single data set. *
* * @param sqlQuery * A SQL query that must use Spark SQL syntax and return a single data set. * @return Returns a reference to this object so that method calls can be chained together. */ public SparkSQL withSqlQuery(String sqlQuery) { setSqlQuery(sqlQuery); return this; } /** *
* A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you
* have a datasource named "MyDataSource". If you specify From
as MyDataSource, and Alias
* as SqlName, then in your SQL you can do:
*
* select * from SqlName
*
* and that gets data from MyDataSource. *
* * @return A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For * example, you have a datasource named "MyDataSource". If you specifyFrom
as MyDataSource,
* and Alias
as SqlName, then in your SQL you can do:
*
* select * from SqlName
*
* and that gets data from MyDataSource.
*/
public java.util.List
* A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you
* have a datasource named "MyDataSource". If you specify
*
* and that gets data from MyDataSource.
* From
as MyDataSource, and Alias
* as SqlName, then in your SQL you can do:
* select * from SqlName
* From
as MyDataSource, and
* Alias
as SqlName, then in your SQL you can do:
* select * from SqlName
*
* and that gets data from MyDataSource.
*/
public void setSqlAliases(java.util.Collection
* A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you
* have a datasource named "MyDataSource". If you specify
*
* and that gets data from MyDataSource.
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setSqlAliases(java.util.Collection)} or {@link #withSqlAliases(java.util.Collection)} if you want to
* override the existing values.
* From
as MyDataSource, and Alias
* as SqlName, then in your SQL you can do:
* select * from SqlName
* From
as MyDataSource, and
* Alias
as SqlName, then in your SQL you can do:
* select * from SqlName
*
* and that gets data from MyDataSource.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public SparkSQL withSqlAliases(SqlAlias... sqlAliases) {
if (this.sqlAliases == null) {
setSqlAliases(new java.util.ArrayList
* A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you
* have a datasource named "MyDataSource". If you specify
*
* and that gets data from MyDataSource.
* From
as MyDataSource, and Alias
* as SqlName, then in your SQL you can do:
* select * from SqlName
* From
as MyDataSource, and
* Alias
as SqlName, then in your SQL you can do:
* select * from SqlName
*
* and that gets data from MyDataSource.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public SparkSQL withSqlAliases(java.util.Collection
* Specifies the data schema for the SparkSQL transform.
*
* Specifies the data schema for the SparkSQL transform.
*
* Specifies the data schema for the SparkSQL transform.
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setOutputSchemas(java.util.Collection)} or {@link #withOutputSchemas(java.util.Collection)} if you want
* to override the existing values.
*
* Specifies the data schema for the SparkSQL transform.
*