/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Represents the input for PutRecord
.See Also:
* AWS
* API Reference
The name of the stream to put the data record into.
*/ inline const Aws::String& GetStreamName() const{ return m_streamName; } /** *The name of the stream to put the data record into.
*/ inline bool StreamNameHasBeenSet() const { return m_streamNameHasBeenSet; } /** *The name of the stream to put the data record into.
*/ inline void SetStreamName(const Aws::String& value) { m_streamNameHasBeenSet = true; m_streamName = value; } /** *The name of the stream to put the data record into.
*/ inline void SetStreamName(Aws::String&& value) { m_streamNameHasBeenSet = true; m_streamName = std::move(value); } /** *The name of the stream to put the data record into.
*/ inline void SetStreamName(const char* value) { m_streamNameHasBeenSet = true; m_streamName.assign(value); } /** *The name of the stream to put the data record into.
*/ inline PutRecordRequest& WithStreamName(const Aws::String& value) { SetStreamName(value); return *this;} /** *The name of the stream to put the data record into.
*/ inline PutRecordRequest& WithStreamName(Aws::String&& value) { SetStreamName(std::move(value)); return *this;} /** *The name of the stream to put the data record into.
*/ inline PutRecordRequest& WithStreamName(const char* value) { SetStreamName(value); return *this;} /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline const Aws::Utils::ByteBuffer& GetData() const{ return m_data; } /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline bool DataHasBeenSet() const { return m_dataHasBeenSet; } /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline void SetData(const Aws::Utils::ByteBuffer& value) { m_dataHasBeenSet = true; m_data = value; } /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline void SetData(Aws::Utils::ByteBuffer&& value) { m_dataHasBeenSet = true; m_data = std::move(value); } /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline PutRecordRequest& WithData(const Aws::Utils::ByteBuffer& value) { SetData(value); return *this;} /** *The data blob to put into the record, which is base64-encoded when the blob * is serialized. When the data blob (the payload before base64-encoding) is added * to the partition key size, the total size must not exceed the maximum record * size (1 MiB).
*/ inline PutRecordRequest& WithData(Aws::Utils::ByteBuffer&& value) { SetData(std::move(value)); return *this;} /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline const Aws::String& GetPartitionKey() const{ return m_partitionKey; } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline bool PartitionKeyHasBeenSet() const { return m_partitionKeyHasBeenSet; } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline void SetPartitionKey(const Aws::String& value) { m_partitionKeyHasBeenSet = true; m_partitionKey = value; } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline void SetPartitionKey(Aws::String&& value) { m_partitionKeyHasBeenSet = true; m_partitionKey = std::move(value); } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline void SetPartitionKey(const char* value) { m_partitionKeyHasBeenSet = true; m_partitionKey.assign(value); } /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline PutRecordRequest& WithPartitionKey(const Aws::String& value) { SetPartitionKey(value); return *this;} /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline PutRecordRequest& WithPartitionKey(Aws::String&& value) { SetPartitionKey(std::move(value)); return *this;} /** *Determines which shard in the stream the data record is assigned to. * Partition keys are Unicode strings with a maximum length limit of 256 characters * for each key. Amazon Kinesis Data Streams uses the partition key as input to a * hash function that maps the partition key and associated data to a specific * shard. Specifically, an MD5 hash function is used to map partition keys to * 128-bit integer values and to map associated data records to shards. As a result * of this hashing mechanism, all data records with the same partition key map to * the same shard within the stream.
*/ inline PutRecordRequest& WithPartitionKey(const char* value) { SetPartitionKey(value); return *this;} /** *The hash value used to explicitly determine the shard the data record is * assigned to by overriding the partition key hash.
*/ inline const Aws::String& GetExplicitHashKey() const{ return m_explicitHashKey; } /** *The hash value used to explicitly determine the shard the data record is * assigned to by overriding the partition key hash.
*/ inline bool ExplicitHashKeyHasBeenSet() const { return m_explicitHashKeyHasBeenSet; } /** *The hash value used to explicitly determine the shard the data record is * assigned to by overriding the partition key hash.
*/ inline void SetExplicitHashKey(const Aws::String& value) { m_explicitHashKeyHasBeenSet = true; m_explicitHashKey = value; } /** *The hash value used to explicitly determine the shard the data record is * assigned to by overriding the partition key hash.
*/ inline void SetExplicitHashKey(Aws::String&& value) { m_explicitHashKeyHasBeenSet = true; m_explicitHashKey = std::move(value); } /** *The hash value used to explicitly determine the shard the data record is * assigned to by overriding the partition key hash.
*/ inline void SetExplicitHashKey(const char* value) { m_explicitHashKeyHasBeenSet = true; m_explicitHashKey.assign(value); } /** *The hash value used to explicitly determine the shard the data record is * assigned to by overriding the partition key hash.
*/ inline PutRecordRequest& WithExplicitHashKey(const Aws::String& value) { SetExplicitHashKey(value); return *this;} /** *The hash value used to explicitly determine the shard the data record is * assigned to by overriding the partition key hash.
*/ inline PutRecordRequest& WithExplicitHashKey(Aws::String&& value) { SetExplicitHashKey(std::move(value)); return *this;} /** *The hash value used to explicitly determine the shard the data record is * assigned to by overriding the partition key hash.
*/ inline PutRecordRequest& WithExplicitHashKey(const char* value) { SetExplicitHashKey(value); return *this;} /** *Guarantees strictly increasing sequence numbers, for puts from the same
* client and to the same partition key. Usage: set the
* SequenceNumberForOrdering
of record n to the sequence number
* of record n-1 (as returned in the result when putting record n-1).
* If this parameter is not set, records are coarsely ordered based on arrival
* time.
Guarantees strictly increasing sequence numbers, for puts from the same
* client and to the same partition key. Usage: set the
* SequenceNumberForOrdering
of record n to the sequence number
* of record n-1 (as returned in the result when putting record n-1).
* If this parameter is not set, records are coarsely ordered based on arrival
* time.
Guarantees strictly increasing sequence numbers, for puts from the same
* client and to the same partition key. Usage: set the
* SequenceNumberForOrdering
of record n to the sequence number
* of record n-1 (as returned in the result when putting record n-1).
* If this parameter is not set, records are coarsely ordered based on arrival
* time.
Guarantees strictly increasing sequence numbers, for puts from the same
* client and to the same partition key. Usage: set the
* SequenceNumberForOrdering
of record n to the sequence number
* of record n-1 (as returned in the result when putting record n-1).
* If this parameter is not set, records are coarsely ordered based on arrival
* time.
Guarantees strictly increasing sequence numbers, for puts from the same
* client and to the same partition key. Usage: set the
* SequenceNumberForOrdering
of record n to the sequence number
* of record n-1 (as returned in the result when putting record n-1).
* If this parameter is not set, records are coarsely ordered based on arrival
* time.
Guarantees strictly increasing sequence numbers, for puts from the same
* client and to the same partition key. Usage: set the
* SequenceNumberForOrdering
of record n to the sequence number
* of record n-1 (as returned in the result when putting record n-1).
* If this parameter is not set, records are coarsely ordered based on arrival
* time.
Guarantees strictly increasing sequence numbers, for puts from the same
* client and to the same partition key. Usage: set the
* SequenceNumberForOrdering
of record n to the sequence number
* of record n-1 (as returned in the result when putting record n-1).
* If this parameter is not set, records are coarsely ordered based on arrival
* time.
Guarantees strictly increasing sequence numbers, for puts from the same
* client and to the same partition key. Usage: set the
* SequenceNumberForOrdering
of record n to the sequence number
* of record n-1 (as returned in the result when putting record n-1).
* If this parameter is not set, records are coarsely ordered based on arrival
* time.