/* * Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* * Do not modify this file. This file is generated from the kinesis-2013-12-02.normal.json service model. */ using System; using System.Collections.Generic; using System.Xml.Serialization; using System.Text; using System.IO; using Amazon.Runtime; using Amazon.Runtime.Internal; namespace Amazon.Kinesis.Model { /// /// Container for the parameters to the PutRecords operation. /// Writes multiple data records into a Kinesis data stream in a single call (also referred /// to as a PutRecords request). Use this operation to send data into the /// stream for data ingestion and processing. /// /// /// /// Each PutRecords request can support up to 500 records. Each record in /// the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, /// including partition keys. Each shard can support writes up to 1,000 records per second, /// up to a maximum data write total of 1 MB per second. /// /// /// /// You must specify the name of the stream that captures, stores, and transports the /// data; and an array of request Records, with each record in the array /// requiring a partition key and data blob. The record size limit applies to the total /// size of the partition key and data blob. /// /// /// /// The data blob can be any type of data; for example, a segment from a log file, geographic/location /// data, website clickstream data, and so on. /// /// /// /// The partition key is used by Kinesis Data Streams as input to a hash function that /// maps the partition key and associated data to a specific shard. An MD5 hash function /// is used to map partition keys to 128-bit integer values and to map associated data /// records to shards. As a result of this hashing mechanism, all data records with the /// same partition key map to the same shard within the stream. For more information, /// see Adding /// Data to a Stream in the Amazon Kinesis Data Streams Developer Guide. /// /// /// /// Each record in the Records array may include an optional parameter, ExplicitHashKey, /// which overrides the partition key to shard mapping. This parameter allows a data producer /// to determine explicitly the shard where the record is stored. For more information, /// see Adding /// Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer /// Guide. /// /// /// /// The PutRecords response includes an array of response Records. /// Each record in the response array directly correlates with a record in the request /// array using natural ordering, from the top to the bottom of the request and response. /// The response Records array always includes the same number of records /// as the request array. /// /// /// /// The response Records array includes both successfully and unsuccessfully /// processed records. Kinesis Data Streams attempts to process all records in each PutRecords /// request. A single record failure does not stop the processing of subsequent records. /// /// /// /// A successfully processed record includes ShardId and SequenceNumber /// values. The ShardId parameter identifies the shard in the stream where /// the record is stored. The SequenceNumber parameter is an identifier assigned /// to the put record, unique to all records in the stream. /// /// /// /// An unsuccessfully processed record includes ErrorCode and ErrorMessage /// values. ErrorCode reflects the type of error and can be one of the following /// values: ProvisionedThroughputExceededException or InternalFailure. /// ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException /// exception including the account ID, stream name, and shard ID of the record that was /// throttled. For more information about partially successful responses, see Adding /// Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer /// Guide. /// /// /// /// By default, data records are accessible for 24 hours from the time that they are added /// to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod /// to modify this retention period. /// /// public partial class PutRecordsRequest : AmazonKinesisRequest { private List _records = new List(); private string _streamName; /// /// Gets and sets the property Records. /// /// The records associated with the request. /// /// [AWSProperty(Required=true, Min=1, Max=500)] public List Records { get { return this._records; } set { this._records = value; } } // Check to see if Records property is set internal bool IsSetRecords() { return this._records != null && this._records.Count > 0; } /// /// Gets and sets the property StreamName. /// /// The stream name associated with the request. /// /// [AWSProperty(Required=true, Min=1, Max=128)] public string StreamName { get { return this._streamName; } set { this._streamName = value; } } // Check to see if StreamName property is set internal bool IsSetStreamName() { return this._streamName != null; } } }