/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace Firehose { namespace Model { /** *

A serializer to use for converting data to the Parquet format before storing * it in Amazon S3. For more information, see Apache * Parquet.

See Also:

AWS * API Reference

*/ class AWS_FIREHOSE_API ParquetSerDe { public: ParquetSerDe(); ParquetSerDe(Aws::Utils::Json::JsonView jsonValue); ParquetSerDe& operator=(Aws::Utils::Json::JsonView jsonValue); Aws::Utils::Json::JsonValue Jsonize() const; /** *

The Hadoop Distributed File System (HDFS) block size. This is useful if you * intend to copy the data from Amazon S3 to HDFS before querying. The default is * 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for * padding calculations.

*/ inline int GetBlockSizeBytes() const{ return m_blockSizeBytes; } /** *

The Hadoop Distributed File System (HDFS) block size. This is useful if you * intend to copy the data from Amazon S3 to HDFS before querying. The default is * 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for * padding calculations.

*/ inline bool BlockSizeBytesHasBeenSet() const { return m_blockSizeBytesHasBeenSet; } /** *

The Hadoop Distributed File System (HDFS) block size. This is useful if you * intend to copy the data from Amazon S3 to HDFS before querying. The default is * 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for * padding calculations.

*/ inline void SetBlockSizeBytes(int value) { m_blockSizeBytesHasBeenSet = true; m_blockSizeBytes = value; } /** *

The Hadoop Distributed File System (HDFS) block size. This is useful if you * intend to copy the data from Amazon S3 to HDFS before querying. The default is * 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for * padding calculations.

*/ inline ParquetSerDe& WithBlockSizeBytes(int value) { SetBlockSizeBytes(value); return *this;} /** *

The Parquet page size. Column chunks are divided into pages. A page is * conceptually an indivisible unit (in terms of compression and encoding). The * minimum value is 64 KiB and the default is 1 MiB.

*/ inline int GetPageSizeBytes() const{ return m_pageSizeBytes; } /** *

The Parquet page size. Column chunks are divided into pages. A page is * conceptually an indivisible unit (in terms of compression and encoding). The * minimum value is 64 KiB and the default is 1 MiB.

*/ inline bool PageSizeBytesHasBeenSet() const { return m_pageSizeBytesHasBeenSet; } /** *

The Parquet page size. Column chunks are divided into pages. A page is * conceptually an indivisible unit (in terms of compression and encoding). The * minimum value is 64 KiB and the default is 1 MiB.

*/ inline void SetPageSizeBytes(int value) { m_pageSizeBytesHasBeenSet = true; m_pageSizeBytes = value; } /** *

The Parquet page size. Column chunks are divided into pages. A page is * conceptually an indivisible unit (in terms of compression and encoding). The * minimum value is 64 KiB and the default is 1 MiB.

*/ inline ParquetSerDe& WithPageSizeBytes(int value) { SetPageSizeBytes(value); return *this;} /** *

The compression code to use over data blocks. The possible values are * UNCOMPRESSED, SNAPPY, and GZIP, with the * default being SNAPPY. Use SNAPPY for higher * decompression speed. Use GZIP if the compression ratio is more * important than speed.

*/ inline const ParquetCompression& GetCompression() const{ return m_compression; } /** *

The compression code to use over data blocks. The possible values are * UNCOMPRESSED, SNAPPY, and GZIP, with the * default being SNAPPY. Use SNAPPY for higher * decompression speed. Use GZIP if the compression ratio is more * important than speed.

*/ inline bool CompressionHasBeenSet() const { return m_compressionHasBeenSet; } /** *

The compression code to use over data blocks. The possible values are * UNCOMPRESSED, SNAPPY, and GZIP, with the * default being SNAPPY. Use SNAPPY for higher * decompression speed. Use GZIP if the compression ratio is more * important than speed.

*/ inline void SetCompression(const ParquetCompression& value) { m_compressionHasBeenSet = true; m_compression = value; } /** *

The compression code to use over data blocks. The possible values are * UNCOMPRESSED, SNAPPY, and GZIP, with the * default being SNAPPY. Use SNAPPY for higher * decompression speed. Use GZIP if the compression ratio is more * important than speed.

*/ inline void SetCompression(ParquetCompression&& value) { m_compressionHasBeenSet = true; m_compression = std::move(value); } /** *

The compression code to use over data blocks. The possible values are * UNCOMPRESSED, SNAPPY, and GZIP, with the * default being SNAPPY. Use SNAPPY for higher * decompression speed. Use GZIP if the compression ratio is more * important than speed.

*/ inline ParquetSerDe& WithCompression(const ParquetCompression& value) { SetCompression(value); return *this;} /** *

The compression code to use over data blocks. The possible values are * UNCOMPRESSED, SNAPPY, and GZIP, with the * default being SNAPPY. Use SNAPPY for higher * decompression speed. Use GZIP if the compression ratio is more * important than speed.

*/ inline ParquetSerDe& WithCompression(ParquetCompression&& value) { SetCompression(std::move(value)); return *this;} /** *

Indicates whether to enable dictionary compression.

*/ inline bool GetEnableDictionaryCompression() const{ return m_enableDictionaryCompression; } /** *

Indicates whether to enable dictionary compression.

*/ inline bool EnableDictionaryCompressionHasBeenSet() const { return m_enableDictionaryCompressionHasBeenSet; } /** *

Indicates whether to enable dictionary compression.

*/ inline void SetEnableDictionaryCompression(bool value) { m_enableDictionaryCompressionHasBeenSet = true; m_enableDictionaryCompression = value; } /** *

Indicates whether to enable dictionary compression.

*/ inline ParquetSerDe& WithEnableDictionaryCompression(bool value) { SetEnableDictionaryCompression(value); return *this;} /** *

The maximum amount of padding to apply. This is useful if you intend to copy * the data from Amazon S3 to HDFS before querying. The default is 0.

*/ inline int GetMaxPaddingBytes() const{ return m_maxPaddingBytes; } /** *

The maximum amount of padding to apply. This is useful if you intend to copy * the data from Amazon S3 to HDFS before querying. The default is 0.

*/ inline bool MaxPaddingBytesHasBeenSet() const { return m_maxPaddingBytesHasBeenSet; } /** *

The maximum amount of padding to apply. This is useful if you intend to copy * the data from Amazon S3 to HDFS before querying. The default is 0.

*/ inline void SetMaxPaddingBytes(int value) { m_maxPaddingBytesHasBeenSet = true; m_maxPaddingBytes = value; } /** *

The maximum amount of padding to apply. This is useful if you intend to copy * the data from Amazon S3 to HDFS before querying. The default is 0.

*/ inline ParquetSerDe& WithMaxPaddingBytes(int value) { SetMaxPaddingBytes(value); return *this;} /** *

Indicates the version of row format to output. The possible values are * V1 and V2. The default is V1.

*/ inline const ParquetWriterVersion& GetWriterVersion() const{ return m_writerVersion; } /** *

Indicates the version of row format to output. The possible values are * V1 and V2. The default is V1.

*/ inline bool WriterVersionHasBeenSet() const { return m_writerVersionHasBeenSet; } /** *

Indicates the version of row format to output. The possible values are * V1 and V2. The default is V1.

*/ inline void SetWriterVersion(const ParquetWriterVersion& value) { m_writerVersionHasBeenSet = true; m_writerVersion = value; } /** *

Indicates the version of row format to output. The possible values are * V1 and V2. The default is V1.

*/ inline void SetWriterVersion(ParquetWriterVersion&& value) { m_writerVersionHasBeenSet = true; m_writerVersion = std::move(value); } /** *

Indicates the version of row format to output. The possible values are * V1 and V2. The default is V1.

*/ inline ParquetSerDe& WithWriterVersion(const ParquetWriterVersion& value) { SetWriterVersion(value); return *this;} /** *

Indicates the version of row format to output. The possible values are * V1 and V2. The default is V1.

*/ inline ParquetSerDe& WithWriterVersion(ParquetWriterVersion&& value) { SetWriterVersion(std::move(value)); return *this;} private: int m_blockSizeBytes; bool m_blockSizeBytesHasBeenSet; int m_pageSizeBytes; bool m_pageSizeBytesHasBeenSet; ParquetCompression m_compression; bool m_compressionHasBeenSet; bool m_enableDictionaryCompression; bool m_enableDictionaryCompressionHasBeenSet; int m_maxPaddingBytes; bool m_maxPaddingBytesHasBeenSet; ParquetWriterVersion m_writerVersion; bool m_writerVersionHasBeenSet; }; } // namespace Model } // namespace Firehose } // namespace Aws