/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Represents the output for PutRecords
.See Also:
* AWS
* API Reference
The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline const Aws::Utils::ByteBuffer& GetData() const{ return m_data; } /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline bool DataHasBeenSet() const { return m_dataHasBeenSet; } /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline void SetData(const Aws::Utils::ByteBuffer& value) { m_dataHasBeenSet = true; m_data = value; } /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline void SetData(Aws::Utils::ByteBuffer&& value) { m_dataHasBeenSet = true; m_data = std::move(value); } /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline PutRecordsRequestEntry& WithData(const Aws::Utils::ByteBuffer& value) { SetData(value); return *this;} /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline PutRecordsRequestEntry& WithData(Aws::Utils::ByteBuffer&& value) { SetData(std::move(value)); return *this;} /** *The hash value used to determine explicitly the shard that the data record is * assigned to by overriding the partition key hash.
*/ inline const Aws::String& GetExplicitHashKey() const{ return m_explicitHashKey; } /** *The hash value used to determine explicitly the shard that the data record is * assigned to by overriding the partition key hash.
*/ inline bool ExplicitHashKeyHasBeenSet() const { return m_explicitHashKeyHasBeenSet; } /** *The hash value used to determine explicitly the shard that the data record is * assigned to by overriding the partition key hash.
*/ inline void SetExplicitHashKey(const Aws::String& value) { m_explicitHashKeyHasBeenSet = true; m_explicitHashKey = value; } /** *The hash value used to determine explicitly the shard that the data record is * assigned to by overriding the partition key hash.
*/ inline void SetExplicitHashKey(Aws::String&& value) { m_explicitHashKeyHasBeenSet = true; m_explicitHashKey = std::move(value); } /** *The hash value used to determine explicitly the shard that the data record is * assigned to by overriding the partition key hash.
*/ inline void SetExplicitHashKey(const char* value) { m_explicitHashKeyHasBeenSet = true; m_explicitHashKey.assign(value); } /** *The hash value used to determine explicitly the shard that the data record is * assigned to by overriding the partition key hash.
*/ inline PutRecordsRequestEntry& WithExplicitHashKey(const Aws::String& value) { SetExplicitHashKey(value); return *this;} /** *The hash value used to determine explicitly the shard that the data record is * assigned to by overriding the partition key hash.
*/ inline PutRecordsRequestEntry& WithExplicitHashKey(Aws::String&& value) { SetExplicitHashKey(std::move(value)); return *this;} /** *The hash value used to determine explicitly the shard that the data record is * assigned to by overriding the partition key hash.
*/ inline PutRecordsRequestEntry& WithExplicitHashKey(const char* value) { SetExplicitHashKey(value); return *this;} /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline const Aws::String& GetPartitionKey() const{ return m_partitionKey; } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline bool PartitionKeyHasBeenSet() const { return m_partitionKeyHasBeenSet; } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline void SetPartitionKey(const Aws::String& value) { m_partitionKeyHasBeenSet = true; m_partitionKey = value; } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline void SetPartitionKey(Aws::String&& value) { m_partitionKeyHasBeenSet = true; m_partitionKey = std::move(value); } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline void SetPartitionKey(const char* value) { m_partitionKeyHasBeenSet = true; m_partitionKey.assign(value); } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline PutRecordsRequestEntry& WithPartitionKey(const Aws::String& value) { SetPartitionKey(value); return *this;} /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline PutRecordsRequestEntry& WithPartitionKey(Aws::String&& value) { SetPartitionKey(std::move(value)); return *this;} /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline PutRecordsRequestEntry& WithPartitionKey(const char* value) { SetPartitionKey(value); return *this;} private: Aws::Utils::ByteBuffer m_data; bool m_dataHasBeenSet; Aws::String m_explicitHashKey; bool m_explicitHashKeyHasBeenSet; Aws::String m_partitionKey; bool m_partitionKeyHasBeenSet; }; } // namespace Model } // namespace Kinesis } // namespace Aws