/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.kinesisfirehose.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, * see Apache Parquet. *
* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class ParquetSerDe implements Serializable, Cloneable, StructuredPojo { /** ** The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon * S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this * value for padding calculations. *
*/ private Integer blockSizeBytes; /** ** The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms * of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. *
*/ private Integer pageSizeBytes; /** *
* The compression code to use over data blocks. The possible values are UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use SNAPPY
* for higher decompression speed. Use GZIP
if the compression ratio is more important than speed.
*
* Indicates whether to enable dictionary compression. *
*/ private Boolean enableDictionaryCompression; /** ** The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS * before querying. The default is 0. *
*/ private Integer maxPaddingBytes; /** *
* Indicates the version of row format to output. The possible values are V1
and V2
. The
* default is V1
.
*
* The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon * S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this * value for padding calculations. *
* * @param blockSizeBytes * The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from * Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose * uses this value for padding calculations. */ public void setBlockSizeBytes(Integer blockSizeBytes) { this.blockSizeBytes = blockSizeBytes; } /** ** The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon * S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this * value for padding calculations. *
* * @return The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from * Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data * Firehose uses this value for padding calculations. */ public Integer getBlockSizeBytes() { return this.blockSizeBytes; } /** ** The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon * S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this * value for padding calculations. *
* * @param blockSizeBytes * The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from * Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose * uses this value for padding calculations. * @return Returns a reference to this object so that method calls can be chained together. */ public ParquetSerDe withBlockSizeBytes(Integer blockSizeBytes) { setBlockSizeBytes(blockSizeBytes); return this; } /** ** The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms * of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. *
* * @param pageSizeBytes * The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit * (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. */ public void setPageSizeBytes(Integer pageSizeBytes) { this.pageSizeBytes = pageSizeBytes; } /** ** The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms * of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. *
* * @return The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit * (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. */ public Integer getPageSizeBytes() { return this.pageSizeBytes; } /** ** The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms * of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. *
* * @param pageSizeBytes * The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit * (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. * @return Returns a reference to this object so that method calls can be chained together. */ public ParquetSerDe withPageSizeBytes(Integer pageSizeBytes) { setPageSizeBytes(pageSizeBytes); return this; } /** *
* The compression code to use over data blocks. The possible values are UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use SNAPPY
* for higher decompression speed. Use GZIP
if the compression ratio is more important than speed.
*
UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use
* SNAPPY
for higher decompression speed. Use GZIP
if the compression ratio is more
* important than speed.
* @see ParquetCompression
*/
public void setCompression(String compression) {
this.compression = compression;
}
/**
*
* The compression code to use over data blocks. The possible values are UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use SNAPPY
* for higher decompression speed. Use GZIP
if the compression ratio is more important than speed.
*
UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use
* SNAPPY
for higher decompression speed. Use GZIP
if the compression ratio is
* more important than speed.
* @see ParquetCompression
*/
public String getCompression() {
return this.compression;
}
/**
*
* The compression code to use over data blocks. The possible values are UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use SNAPPY
* for higher decompression speed. Use GZIP
if the compression ratio is more important than speed.
*
UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use
* SNAPPY
for higher decompression speed. Use GZIP
if the compression ratio is more
* important than speed.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetCompression
*/
public ParquetSerDe withCompression(String compression) {
setCompression(compression);
return this;
}
/**
*
* The compression code to use over data blocks. The possible values are UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use SNAPPY
* for higher decompression speed. Use GZIP
if the compression ratio is more important than speed.
*
UNCOMPRESSED
,
* SNAPPY
, and GZIP
, with the default being SNAPPY
. Use
* SNAPPY
for higher decompression speed. Use GZIP
if the compression ratio is more
* important than speed.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetCompression
*/
public ParquetSerDe withCompression(ParquetCompression compression) {
this.compression = compression.toString();
return this;
}
/**
* * Indicates whether to enable dictionary compression. *
* * @param enableDictionaryCompression * Indicates whether to enable dictionary compression. */ public void setEnableDictionaryCompression(Boolean enableDictionaryCompression) { this.enableDictionaryCompression = enableDictionaryCompression; } /** ** Indicates whether to enable dictionary compression. *
* * @return Indicates whether to enable dictionary compression. */ public Boolean getEnableDictionaryCompression() { return this.enableDictionaryCompression; } /** ** Indicates whether to enable dictionary compression. *
* * @param enableDictionaryCompression * Indicates whether to enable dictionary compression. * @return Returns a reference to this object so that method calls can be chained together. */ public ParquetSerDe withEnableDictionaryCompression(Boolean enableDictionaryCompression) { setEnableDictionaryCompression(enableDictionaryCompression); return this; } /** ** Indicates whether to enable dictionary compression. *
* * @return Indicates whether to enable dictionary compression. */ public Boolean isEnableDictionaryCompression() { return this.enableDictionaryCompression; } /** ** The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS * before querying. The default is 0. *
* * @param maxPaddingBytes * The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to * HDFS before querying. The default is 0. */ public void setMaxPaddingBytes(Integer maxPaddingBytes) { this.maxPaddingBytes = maxPaddingBytes; } /** ** The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS * before querying. The default is 0. *
* * @return The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to * HDFS before querying. The default is 0. */ public Integer getMaxPaddingBytes() { return this.maxPaddingBytes; } /** ** The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS * before querying. The default is 0. *
* * @param maxPaddingBytes * The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to * HDFS before querying. The default is 0. * @return Returns a reference to this object so that method calls can be chained together. */ public ParquetSerDe withMaxPaddingBytes(Integer maxPaddingBytes) { setMaxPaddingBytes(maxPaddingBytes); return this; } /** *
* Indicates the version of row format to output. The possible values are V1
and V2
. The
* default is V1
.
*
V1
and V2
* . The default is V1
.
* @see ParquetWriterVersion
*/
public void setWriterVersion(String writerVersion) {
this.writerVersion = writerVersion;
}
/**
*
* Indicates the version of row format to output. The possible values are V1
and V2
. The
* default is V1
.
*
V1
and
* V2
. The default is V1
.
* @see ParquetWriterVersion
*/
public String getWriterVersion() {
return this.writerVersion;
}
/**
*
* Indicates the version of row format to output. The possible values are V1
and V2
. The
* default is V1
.
*
V1
and V2
* . The default is V1
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetWriterVersion
*/
public ParquetSerDe withWriterVersion(String writerVersion) {
setWriterVersion(writerVersion);
return this;
}
/**
*
* Indicates the version of row format to output. The possible values are V1
and V2
. The
* default is V1
.
*
V1
and V2
* . The default is V1
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetWriterVersion
*/
public ParquetSerDe withWriterVersion(ParquetWriterVersion writerVersion) {
this.writerVersion = writerVersion.toString();
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getBlockSizeBytes() != null)
sb.append("BlockSizeBytes: ").append(getBlockSizeBytes()).append(",");
if (getPageSizeBytes() != null)
sb.append("PageSizeBytes: ").append(getPageSizeBytes()).append(",");
if (getCompression() != null)
sb.append("Compression: ").append(getCompression()).append(",");
if (getEnableDictionaryCompression() != null)
sb.append("EnableDictionaryCompression: ").append(getEnableDictionaryCompression()).append(",");
if (getMaxPaddingBytes() != null)
sb.append("MaxPaddingBytes: ").append(getMaxPaddingBytes()).append(",");
if (getWriterVersion() != null)
sb.append("WriterVersion: ").append(getWriterVersion());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ParquetSerDe == false)
return false;
ParquetSerDe other = (ParquetSerDe) obj;
if (other.getBlockSizeBytes() == null ^ this.getBlockSizeBytes() == null)
return false;
if (other.getBlockSizeBytes() != null && other.getBlockSizeBytes().equals(this.getBlockSizeBytes()) == false)
return false;
if (other.getPageSizeBytes() == null ^ this.getPageSizeBytes() == null)
return false;
if (other.getPageSizeBytes() != null && other.getPageSizeBytes().equals(this.getPageSizeBytes()) == false)
return false;
if (other.getCompression() == null ^ this.getCompression() == null)
return false;
if (other.getCompression() != null && other.getCompression().equals(this.getCompression()) == false)
return false;
if (other.getEnableDictionaryCompression() == null ^ this.getEnableDictionaryCompression() == null)
return false;
if (other.getEnableDictionaryCompression() != null && other.getEnableDictionaryCompression().equals(this.getEnableDictionaryCompression()) == false)
return false;
if (other.getMaxPaddingBytes() == null ^ this.getMaxPaddingBytes() == null)
return false;
if (other.getMaxPaddingBytes() != null && other.getMaxPaddingBytes().equals(this.getMaxPaddingBytes()) == false)
return false;
if (other.getWriterVersion() == null ^ this.getWriterVersion() == null)
return false;
if (other.getWriterVersion() != null && other.getWriterVersion().equals(this.getWriterVersion()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getBlockSizeBytes() == null) ? 0 : getBlockSizeBytes().hashCode());
hashCode = prime * hashCode + ((getPageSizeBytes() == null) ? 0 : getPageSizeBytes().hashCode());
hashCode = prime * hashCode + ((getCompression() == null) ? 0 : getCompression().hashCode());
hashCode = prime * hashCode + ((getEnableDictionaryCompression() == null) ? 0 : getEnableDictionaryCompression().hashCode());
hashCode = prime * hashCode + ((getMaxPaddingBytes() == null) ? 0 : getMaxPaddingBytes().hashCode());
hashCode = prime * hashCode + ((getWriterVersion() == null) ? 0 : getWriterVersion().hashCode());
return hashCode;
}
@Override
public ParquetSerDe clone() {
try {
return (ParquetSerDe) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.kinesisfirehose.model.transform.ParquetSerDeMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}