/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.datasync.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.AmazonWebServiceRequest; /** * * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class CreateLocationHdfsRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable { /** *
* A subdirectory in the HDFS cluster. This subdirectory is used to read data from or write data to the HDFS
* cluster. If the subdirectory isn't specified, it will default to /
.
*
* The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, and * renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes. You * can use only one NameNode. *
*/ private java.util.List* The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The * default block size is 128 mebibytes (MiB). *
*/ private Integer blockSize; /** ** The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is replicated * to three DataNodes. *
*/ private Integer replicationFactor; /** ** The URI of the HDFS cluster's Key Management Server (KMS). *
*/ private String kmsKeyProviderUri; /** *
* The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer
* protection settings configured on the Hadoop Distributed File System (HDFS) cluster. If
* QopConfiguration
isn't specified, RpcProtection
and DataTransferProtection
* default to PRIVACY
. If you set RpcProtection
or DataTransferProtection
,
* the other parameter assumes the same value.
*
* The type of authentication used to determine the identity of the user. *
*/ private String authenticationType; /** ** The user name used to identify the client on the host operating system. *
*
* If SIMPLE
is specified for AuthenticationType
, this parameter is required.
*
* The Kerberos principal with access to the files and folders on the HDFS cluster. *
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted * keys. You can load the keytab from a file by providing the file's address. If you're using the CLI, it performs * base64 encoding for you. Otherwise, provide the base64-encoded text. *
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The krb5.conf
file that contains the Kerberos configuration information. You can load the
* krb5.conf
file by providing the file's address. If you're using the CLI, it performs the base64
* encoding for you. Otherwise, provide the base64-encoded text.
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. *
*/ private java.util.List* The key-value pair that represents the tag that you want to add to the location. The value can be an empty * string. We recommend using tags to name your resources. *
*/ private java.util.List
* A subdirectory in the HDFS cluster. This subdirectory is used to read data from or write data to the HDFS
* cluster. If the subdirectory isn't specified, it will default to /
.
*
/
.
*/
public void setSubdirectory(String subdirectory) {
this.subdirectory = subdirectory;
}
/**
*
* A subdirectory in the HDFS cluster. This subdirectory is used to read data from or write data to the HDFS
* cluster. If the subdirectory isn't specified, it will default to /
.
*
/
.
*/
public String getSubdirectory() {
return this.subdirectory;
}
/**
*
* A subdirectory in the HDFS cluster. This subdirectory is used to read data from or write data to the HDFS
* cluster. If the subdirectory isn't specified, it will default to /
.
*
/
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateLocationHdfsRequest withSubdirectory(String subdirectory) {
setSubdirectory(subdirectory);
return this;
}
/**
* * The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, and * renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes. You * can use only one NameNode. *
* * @return The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, * and renaming files and directories. The NameNode contains the information to map blocks of data to the * DataNodes. You can use only one NameNode. */ public java.util.List* The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, and * renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes. You * can use only one NameNode. *
* * @param nameNodes * The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, * and renaming files and directories. The NameNode contains the information to map blocks of data to the * DataNodes. You can use only one NameNode. */ public void setNameNodes(java.util.Collection* The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, and * renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes. You * can use only one NameNode. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setNameNodes(java.util.Collection)} or {@link #withNameNodes(java.util.Collection)} if you want to * override the existing values. *
* * @param nameNodes * The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, * and renaming files and directories. The NameNode contains the information to map blocks of data to the * DataNodes. You can use only one NameNode. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withNameNodes(HdfsNameNode... nameNodes) { if (this.nameNodes == null) { setNameNodes(new java.util.ArrayList* The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, and * renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes. You * can use only one NameNode. *
* * @param nameNodes * The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, * and renaming files and directories. The NameNode contains the information to map blocks of data to the * DataNodes. You can use only one NameNode. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withNameNodes(java.util.Collection* The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The * default block size is 128 mebibytes (MiB). *
* * @param blockSize * The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. * The default block size is 128 mebibytes (MiB). */ public void setBlockSize(Integer blockSize) { this.blockSize = blockSize; } /** ** The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The * default block size is 128 mebibytes (MiB). *
* * @return The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. * The default block size is 128 mebibytes (MiB). */ public Integer getBlockSize() { return this.blockSize; } /** ** The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The * default block size is 128 mebibytes (MiB). *
* * @param blockSize * The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. * The default block size is 128 mebibytes (MiB). * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withBlockSize(Integer blockSize) { setBlockSize(blockSize); return this; } /** ** The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is replicated * to three DataNodes. *
* * @param replicationFactor * The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is * replicated to three DataNodes. */ public void setReplicationFactor(Integer replicationFactor) { this.replicationFactor = replicationFactor; } /** ** The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is replicated * to three DataNodes. *
* * @return The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is * replicated to three DataNodes. */ public Integer getReplicationFactor() { return this.replicationFactor; } /** ** The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is replicated * to three DataNodes. *
* * @param replicationFactor * The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is * replicated to three DataNodes. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withReplicationFactor(Integer replicationFactor) { setReplicationFactor(replicationFactor); return this; } /** ** The URI of the HDFS cluster's Key Management Server (KMS). *
* * @param kmsKeyProviderUri * The URI of the HDFS cluster's Key Management Server (KMS). */ public void setKmsKeyProviderUri(String kmsKeyProviderUri) { this.kmsKeyProviderUri = kmsKeyProviderUri; } /** ** The URI of the HDFS cluster's Key Management Server (KMS). *
* * @return The URI of the HDFS cluster's Key Management Server (KMS). */ public String getKmsKeyProviderUri() { return this.kmsKeyProviderUri; } /** ** The URI of the HDFS cluster's Key Management Server (KMS). *
* * @param kmsKeyProviderUri * The URI of the HDFS cluster's Key Management Server (KMS). * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withKmsKeyProviderUri(String kmsKeyProviderUri) { setKmsKeyProviderUri(kmsKeyProviderUri); return this; } /** *
* The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer
* protection settings configured on the Hadoop Distributed File System (HDFS) cluster. If
* QopConfiguration
isn't specified, RpcProtection
and DataTransferProtection
* default to PRIVACY
. If you set RpcProtection
or DataTransferProtection
,
* the other parameter assumes the same value.
*
QopConfiguration
isn't specified, RpcProtection
and
* DataTransferProtection
default to PRIVACY
. If you set RpcProtection
* or DataTransferProtection
, the other parameter assumes the same value.
*/
public void setQopConfiguration(QopConfiguration qopConfiguration) {
this.qopConfiguration = qopConfiguration;
}
/**
*
* The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer
* protection settings configured on the Hadoop Distributed File System (HDFS) cluster. If
* QopConfiguration
isn't specified, RpcProtection
and DataTransferProtection
* default to PRIVACY
. If you set RpcProtection
or DataTransferProtection
,
* the other parameter assumes the same value.
*
QopConfiguration
isn't specified, RpcProtection
and
* DataTransferProtection
default to PRIVACY
. If you set
* RpcProtection
or DataTransferProtection
, the other parameter assumes the same
* value.
*/
public QopConfiguration getQopConfiguration() {
return this.qopConfiguration;
}
/**
*
* The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer
* protection settings configured on the Hadoop Distributed File System (HDFS) cluster. If
* QopConfiguration
isn't specified, RpcProtection
and DataTransferProtection
* default to PRIVACY
. If you set RpcProtection
or DataTransferProtection
,
* the other parameter assumes the same value.
*
QopConfiguration
isn't specified, RpcProtection
and
* DataTransferProtection
default to PRIVACY
. If you set RpcProtection
* or DataTransferProtection
, the other parameter assumes the same value.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateLocationHdfsRequest withQopConfiguration(QopConfiguration qopConfiguration) {
setQopConfiguration(qopConfiguration);
return this;
}
/**
* * The type of authentication used to determine the identity of the user. *
* * @param authenticationType * The type of authentication used to determine the identity of the user. * @see HdfsAuthenticationType */ public void setAuthenticationType(String authenticationType) { this.authenticationType = authenticationType; } /** ** The type of authentication used to determine the identity of the user. *
* * @return The type of authentication used to determine the identity of the user. * @see HdfsAuthenticationType */ public String getAuthenticationType() { return this.authenticationType; } /** ** The type of authentication used to determine the identity of the user. *
* * @param authenticationType * The type of authentication used to determine the identity of the user. * @return Returns a reference to this object so that method calls can be chained together. * @see HdfsAuthenticationType */ public CreateLocationHdfsRequest withAuthenticationType(String authenticationType) { setAuthenticationType(authenticationType); return this; } /** ** The type of authentication used to determine the identity of the user. *
* * @param authenticationType * The type of authentication used to determine the identity of the user. * @return Returns a reference to this object so that method calls can be chained together. * @see HdfsAuthenticationType */ public CreateLocationHdfsRequest withAuthenticationType(HdfsAuthenticationType authenticationType) { this.authenticationType = authenticationType.toString(); return this; } /** ** The user name used to identify the client on the host operating system. *
*
* If SIMPLE
is specified for AuthenticationType
, this parameter is required.
*
* If SIMPLE
is specified for AuthenticationType
, this parameter is required.
*
* The user name used to identify the client on the host operating system. *
*
* If SIMPLE
is specified for AuthenticationType
, this parameter is required.
*
* If SIMPLE
is specified for AuthenticationType
, this parameter is required.
*
* The user name used to identify the client on the host operating system. *
*
* If SIMPLE
is specified for AuthenticationType
, this parameter is required.
*
* If SIMPLE
is specified for AuthenticationType
, this parameter is required.
*
* The Kerberos principal with access to the files and folders on the HDFS cluster. *
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The Kerberos principal with access to the files and folders on the HDFS cluster. *
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The Kerberos principal with access to the files and folders on the HDFS cluster. *
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted * keys. You can load the keytab from a file by providing the file's address. If you're using the CLI, it performs * base64 encoding for you. Otherwise, provide the base64-encoded text. *
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The AWS SDK for Java performs a Base64 encoding on this field before sending this request to the AWS service. * Users of the SDK should not perform Base64 encoding on this field. *
** Warning: ByteBuffers returned by the SDK are mutable. Changes to the content or position of the byte buffer will * be seen by all objects that have a reference to this object. It is recommended to call ByteBuffer.duplicate() or * ByteBuffer.asReadOnlyBuffer() before using or reading from the buffer. This behavior will be changed in a future * major version of the SDK. *
* * @param kerberosKeytab * The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the * encrypted keys. You can load the keytab from a file by providing the file's address. If you're using the * CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted * keys. You can load the keytab from a file by providing the file's address. If you're using the CLI, it performs * base64 encoding for you. Otherwise, provide the base64-encoded text. *
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* {@code ByteBuffer}s are stateful. Calling their {@code get} methods changes their {@code position}. We recommend * using {@link java.nio.ByteBuffer#asReadOnlyBuffer()} to create a read-only view of the buffer with an independent * {@code position}, and calling {@code get} methods on this rather than directly on the returned {@code ByteBuffer}. * Doing so will ensure that anyone else using the {@code ByteBuffer} will not be affected by changes to the * {@code position}. *
* * @return The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the * encrypted keys. You can load the keytab from a file by providing the file's address. If you're using the * CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted * keys. You can load the keytab from a file by providing the file's address. If you're using the CLI, it performs * base64 encoding for you. Otherwise, provide the base64-encoded text. *
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The AWS SDK for Java performs a Base64 encoding on this field before sending this request to the AWS service. * Users of the SDK should not perform Base64 encoding on this field. *
** Warning: ByteBuffers returned by the SDK are mutable. Changes to the content or position of the byte buffer will * be seen by all objects that have a reference to this object. It is recommended to call ByteBuffer.duplicate() or * ByteBuffer.asReadOnlyBuffer() before using or reading from the buffer. This behavior will be changed in a future * major version of the SDK. *
* * @param kerberosKeytab * The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the * encrypted keys. You can load the keytab from a file by providing the file's address. If you're using the * CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The krb5.conf
file that contains the Kerberos configuration information. You can load the
* krb5.conf
file by providing the file's address. If you're using the CLI, it performs the base64
* encoding for you. Otherwise, provide the base64-encoded text.
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The AWS SDK for Java performs a Base64 encoding on this field before sending this request to the AWS service. * Users of the SDK should not perform Base64 encoding on this field. *
** Warning: ByteBuffers returned by the SDK are mutable. Changes to the content or position of the byte buffer will * be seen by all objects that have a reference to this object. It is recommended to call ByteBuffer.duplicate() or * ByteBuffer.asReadOnlyBuffer() before using or reading from the buffer. This behavior will be changed in a future * major version of the SDK. *
* * @param kerberosKrb5Conf * Thekrb5.conf
file that contains the Kerberos configuration information. You can load the
* krb5.conf
file by providing the file's address. If you're using the CLI, it performs the
* base64 encoding for you. Otherwise, provide the base64-encoded text.
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The krb5.conf
file that contains the Kerberos configuration information. You can load the
* krb5.conf
file by providing the file's address. If you're using the CLI, it performs the base64
* encoding for you. Otherwise, provide the base64-encoded text.
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* {@code ByteBuffer}s are stateful. Calling their {@code get} methods changes their {@code position}. We recommend * using {@link java.nio.ByteBuffer#asReadOnlyBuffer()} to create a read-only view of the buffer with an independent * {@code position}, and calling {@code get} methods on this rather than directly on the returned {@code ByteBuffer}. * Doing so will ensure that anyone else using the {@code ByteBuffer} will not be affected by changes to the * {@code position}. *
* * @return Thekrb5.conf
file that contains the Kerberos configuration information. You can load the
* krb5.conf
file by providing the file's address. If you're using the CLI, it performs the
* base64 encoding for you. Otherwise, provide the base64-encoded text.
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The krb5.conf
file that contains the Kerberos configuration information. You can load the
* krb5.conf
file by providing the file's address. If you're using the CLI, it performs the base64
* encoding for you. Otherwise, provide the base64-encoded text.
*
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The AWS SDK for Java performs a Base64 encoding on this field before sending this request to the AWS service. * Users of the SDK should not perform Base64 encoding on this field. *
** Warning: ByteBuffers returned by the SDK are mutable. Changes to the content or position of the byte buffer will * be seen by all objects that have a reference to this object. It is recommended to call ByteBuffer.duplicate() or * ByteBuffer.asReadOnlyBuffer() before using or reading from the buffer. This behavior will be changed in a future * major version of the SDK. *
* * @param kerberosKrb5Conf * Thekrb5.conf
file that contains the Kerberos configuration information. You can load the
* krb5.conf
file by providing the file's address. If you're using the CLI, it performs the
* base64 encoding for you. Otherwise, provide the base64-encoded text.
* If KERBEROS
is specified for AuthenticationType
, this parameter is required.
*
* The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. *
* * @return The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. */ public java.util.List* The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. *
* * @param agentArns * The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. */ public void setAgentArns(java.util.Collection* The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setAgentArns(java.util.Collection)} or {@link #withAgentArns(java.util.Collection)} if you want to * override the existing values. *
* * @param agentArns * The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withAgentArns(String... agentArns) { if (this.agentArns == null) { setAgentArns(new java.util.ArrayList* The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. *
* * @param agentArns * The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withAgentArns(java.util.Collection* The key-value pair that represents the tag that you want to add to the location. The value can be an empty * string. We recommend using tags to name your resources. *
* * @return The key-value pair that represents the tag that you want to add to the location. The value can be an * empty string. We recommend using tags to name your resources. */ public java.util.List* The key-value pair that represents the tag that you want to add to the location. The value can be an empty * string. We recommend using tags to name your resources. *
* * @param tags * The key-value pair that represents the tag that you want to add to the location. The value can be an empty * string. We recommend using tags to name your resources. */ public void setTags(java.util.Collection* The key-value pair that represents the tag that you want to add to the location. The value can be an empty * string. We recommend using tags to name your resources. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setTags(java.util.Collection)} or {@link #withTags(java.util.Collection)} if you want to override the * existing values. *
* * @param tags * The key-value pair that represents the tag that you want to add to the location. The value can be an empty * string. We recommend using tags to name your resources. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withTags(TagListEntry... tags) { if (this.tags == null) { setTags(new java.util.ArrayList* The key-value pair that represents the tag that you want to add to the location. The value can be an empty * string. We recommend using tags to name your resources. *
* * @param tags * The key-value pair that represents the tag that you want to add to the location. The value can be an empty * string. We recommend using tags to name your resources. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateLocationHdfsRequest withTags(java.util.Collection