/* * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.kinesis; import java.util.*; import com.amazonaws.*; import com.amazonaws.auth.*; import com.amazonaws.handlers.*; import com.amazonaws.http.*; import com.amazonaws.internal.*; import com.amazonaws.metrics.*; import com.amazonaws.transform.*; import com.amazonaws.util.*; import com.amazonaws.util.AWSRequestMetrics.Field; import com.amazonaws.services.kinesis.model.*; import com.amazonaws.services.kinesis.model.transform.*; /** * Client for accessing Amazon Kinesis Streams. All service calls made using * this client are blocking, and will not return until the service call * completes. *
*
* Amazon Kinesis Data Streams is a managed service that scales elastically for * real-time processing of streaming big data. *
*/ public class AmazonKinesisClient extends AmazonWebServiceClient implements AmazonKinesis { /** Provider for AWS credentials. */ private AWSCredentialsProvider awsCredentialsProvider; /** * List of exception unmarshallers for all Amazon Kinesis Streams * exceptions. */ protected List* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @see DefaultAWSCredentialsProviderChain */ @Deprecated public AmazonKinesisClient() { this(new DefaultAWSCredentialsProviderChain(), new ClientConfiguration()); } /** * Constructs a new client to invoke service methods on AmazonKinesis. A * credentials provider chain will be used that searches for credentials in * this order: *
* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param clientConfiguration The client configuration options controlling * how this client connects to AmazonKinesis (ex: proxy settings, * retry counts, etc.). * @see DefaultAWSCredentialsProviderChain */ @Deprecated public AmazonKinesisClient(ClientConfiguration clientConfiguration) { this(new DefaultAWSCredentialsProviderChain(), clientConfiguration); } /** * Constructs a new client to invoke service methods on AmazonKinesis using * the specified AWS account credentials. *
* The client requests are authenticated using the {@link AWSCredentials} * provided in this constructor. Static AWSCredentials can be passed for * quick testing. However, it is strongly recommended to use Amazon Cognito * vended temporary credentials for use in production. This can be achieved * by using {@link AWSMobileClient}. Please see * https://aws-amplify.github.io/docs/android/authentication for * instructions on how to enable {@link AWSMobileClient}. * *
* AWSMobileClient.getInstance().initialize(getApplicationContext(), new Callback<UserStateDetails>() { * @Override * public void onResult(final UserStateDetails details) { * AmazonKinesisClient client = new AmazonKinesisClient(AWSMobileClient.getInstance()); * } * * @Override * public void onError(final Exception e) { * e.printStackTrace(); * } * }); **
* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentials The AWS credentials (access key ID and secret key) * to use when authenticating with AWS services. */ public AmazonKinesisClient(AWSCredentials awsCredentials) { this(awsCredentials, new ClientConfiguration()); } /** * Constructs a new client to invoke service methods on AmazonKinesis using * the specified AWS account credentials and client configuration options. *
* The client requests are authenticated using the {@link AWSCredentials} * provided in this constructor. Static AWSCredentials can be passed for * quick testing. However, it is strongly recommended to use Amazon Cognito * vended temporary credentials for use in production. This can be achieved * by using {@link AWSMobileClient}. Please see * https://aws-amplify.github.io/docs/android/authentication for * instructions on how to enable {@link AWSMobileClient}. * *
* AWSMobileClient.getInstance().initialize(getApplicationContext(), new Callback<UserStateDetails>() { * @Override * public void onResult(final UserStateDetails details) { * AmazonKinesisClient client = new AmazonKinesisClient(AWSMobileClient.getInstance()); * } * * @Override * public void onError(final Exception e) { * e.printStackTrace(); * } * }); **
* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentials The AWS credentials (access key ID and secret key) * to use when authenticating with AWS services. * @param clientConfiguration The client configuration options controlling * how this client connects to AmazonKinesis (ex: proxy settings, * retry counts, etc.). */ public AmazonKinesisClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) { this(new StaticCredentialsProvider(awsCredentials), clientConfiguration); } /** * Constructs a new client to invoke service methods on AmazonKinesis using * the specified AWS account credentials provider. *
* The client requests are authenticated using the {@link AWSCredentials} * provided in this constructor. Static AWSCredentials can be passed for * quick testing. However, it is strongly recommended to use Amazon Cognito * vended temporary credentials for use in production. This can be achieved * by using {@link AWSMobileClient}. Please see * https://aws-amplify.github.io/docs/android/authentication for * instructions on how to enable {@link AWSMobileClient}. * *
* AWSMobileClient.getInstance().initialize(getApplicationContext(), new Callback<UserStateDetails>() { * @Override * public void onResult(final UserStateDetails details) { * AmazonKinesisClient client = new AmazonKinesisClient(AWSMobileClient.getInstance()); * } * * @Override * public void onError(final Exception e) { * e.printStackTrace(); * } * }); **
* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will * provide credentials to authenticate requests with AWS * services. */ public AmazonKinesisClient(AWSCredentialsProvider awsCredentialsProvider) { this(awsCredentialsProvider, new ClientConfiguration()); } /** * Constructs a new client to invoke service methods on AmazonKinesis using * the specified AWS account credentials provider and client configuration * options. *
* The client requests are authenticated using the {@link AWSCredentials} * provided in this constructor. Static AWSCredentials can be passed for * quick testing. However, it is strongly recommended to use Amazon Cognito * vended temporary credentials for use in production. This can be achieved * by using {@link AWSMobileClient}. Please see * https://aws-amplify.github.io/docs/android/authentication for * instructions on how to enable {@link AWSMobileClient}. * *
* AWSMobileClient.getInstance().initialize(getApplicationContext(), new Callback<UserStateDetails>() { * @Override * public void onResult(final UserStateDetails details) { * AmazonKinesisClient client = new AmazonKinesisClient(AWSMobileClient.getInstance()); * } * * @Override * public void onError(final Exception e) { * e.printStackTrace(); * } * }); **
* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will * provide credentials to authenticate requests with AWS * services. * @param clientConfiguration The client configuration options controlling * how this client connects to AmazonKinesis (ex: proxy settings, * retry counts, etc.). */ public AmazonKinesisClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) { this(awsCredentialsProvider, clientConfiguration, new UrlHttpClient(clientConfiguration)); } /** * Constructs a new client to invoke service methods on AmazonKinesis using * the specified AWS account credentials provider, client configuration * options and request metric collector. *
* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will * provide credentials to authenticate requests with AWS * services. * @param clientConfiguration The client configuration options controlling * how this client connects to AmazonKinesis (ex: proxy settings, * retry counts, etc.). * @param requestMetricCollector optional request metric collector */ @Deprecated public AmazonKinesisClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricCollector) { super(adjustClientConfiguration(clientConfiguration), requestMetricCollector); this.awsCredentialsProvider = awsCredentialsProvider; init(); } /** * Constructs a new client to invoke service methods on AmazonKinesis using * the specified AWS account credentials provider, client configuration * options and request metric collector. *
* All service calls made using this new client object are blocking, and
* will not return until the service call completes.
*
* @param awsCredentialsProvider The AWS credentials provider which will
* provide credentials to authenticate requests with AWS
* services.
* @param clientConfiguration The client configuration options controlling
* how this client connects to AmazonKinesis (ex: proxy settings,
* retry counts, etc.).
* @param httpClient A http client
*/
public AmazonKinesisClient(AWSCredentialsProvider awsCredentialsProvider,
ClientConfiguration clientConfiguration, HttpClient httpClient) {
super(adjustClientConfiguration(clientConfiguration), httpClient);
this.awsCredentialsProvider = awsCredentialsProvider;
init();
}
private void init() {
jsonErrorUnmarshallers = new ArrayList
* Adds or updates tags for the specified Kinesis data stream. Each stream
* can have up to 10 tags.
*
* If tags have already been assigned to the stream,
*
* AddTagsToStream has a limit of five transactions per second per
* account.
*
* Represents the input for
* Creates a Kinesis data stream. A stream captures and transports data
* records that are continuously emitted from different data sources or
* producers. Scale-out within a stream is explicitly supported by
* means of shards, which are uniquely identified groups of data records in
* a stream.
*
* You specify and control the number of shards that a stream is composed
* of. Each shard can support reads up to five transactions per second, up
* to a maximum data read total of 2 MB per second. Each shard can support
* writes up to 1,000 records per second, up to a maximum data write total
* of 1 MB per second. If the amount of data input increases or decreases,
* you can add or remove shards.
*
* The stream name identifies the stream. The name is scoped to the AWS
* account used by the application. It is also scoped by AWS Region. That
* is, two streams in two different accounts can have the same name, and two
* streams in the same account, but in two different Regions, can have the
* same name.
*
*
* You receive a
* Have more than five streams in the
* Create more shards than are authorized for your account.
*
* For the default shard limit for an AWS account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data
* Streams Developer Guide. To increase this limit, contact AWS Support.
*
* You can use
* CreateStream has a limit of five transactions per second per
* account.
*
* Represents the input for
* Decreases the Kinesis data stream's retention period, which is the length
* of time data records are accessible after they are added to the stream.
* The minimum value of a stream's retention period is 24 hours.
*
* This operation may result in lost data. For example, if the stream's
* retention period is 48 hours and is decreased to 24 hours, any data
* already in the stream that is older than 24 hours is inaccessible.
*
* Represents the input for DecreaseStreamRetentionPeriod.
*
* Deletes a Kinesis data stream and all its shards and data. You must shut
* down any applications that are operating on the stream before you delete
* the stream. If an application attempts to operate on a deleted stream, it
* receives the exception
* If the stream is in the
* Note: Kinesis Data Streams might continue to accept data read and
* write operations, such as PutRecord, PutRecords, and
* GetRecords, on a stream in the
* When you delete a stream, any shards in that stream are also deleted, and
* any tags are dissociated from the stream.
*
* You can use the DescribeStream operation to check the state of the
* stream, which is returned in
* DeleteStream has a limit of five transactions per second per
* account.
*
* Represents the input for DeleteStream.
*
* Describes the shard limits and usage for the account.
*
* If you update your account limits, the old limits might be returned for a
* few minutes.
*
* This operation has a limit of one transaction per second per account.
*
* Describes the specified Kinesis data stream.
*
* The information returned includes the stream name, Amazon Resource Name
* (ARN), creation time, enhanced metric configuration, and shard map. The
* shard map is an array of shard objects. For each shard object, there is
* the hash key and sequence number ranges that the shard spans, and the IDs
* of any earlier shards that played in a role in creating the shard. Every
* record ingested in the stream is identified by a sequence number, which
* is assigned when the record is put into the stream.
*
* You can limit the number of shards returned by each call. For more
* information, see Retrieving Shards from a Stream in the Amazon Kinesis Data
* Streams Developer Guide.
*
* There are no guarantees about the chronological order shards returned. To
* process shards in chronological order, use the ID of the parent shard to
* track the lineage to the oldest shard.
*
* This operation has a limit of 10 transactions per second per account.
*
* Represents the input for
* Provides a summarized description of the specified Kinesis data stream
* without the shard list.
*
* The information returned includes the stream name, Amazon Resource Name
* (ARN), status, record retention period, approximate creation time,
* monitoring, encryption details, and open shard count.
*
* Disables enhanced monitoring.
*
* Represents the input for DisableEnhancedMonitoring.
*
* Enables enhanced Kinesis data stream monitoring for shard-level metrics.
*
* Represents the input for EnableEnhancedMonitoring.
*
* Gets data records from a Kinesis data stream's shard.
*
* Specify a shard iterator using the
* You can scale by provisioning multiple shards per stream while
* considering service limits (for more information, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data
* Streams Developer Guide). Your application should have one thread per
* shard, each reading continuously from its stream. To read from a stream
* continually, call GetRecords in a loop. Use
* GetShardIterator to get the shard iterator to specify in the first
* GetRecords call. GetRecords returns a new shard iterator in
*
* Each data record can be up to 1 MB in size, and each shard can read up to
* 2 MB per second. You can ensure that your calls don't exceed the maximum
* supported size or throughput by using the
* The size of the data returned by GetRecords varies depending on
* the utilization of the shard. The maximum size of data that
* GetRecords can return is 10 MB. If a call returns this amount of
* data, subsequent calls made within the next five seconds throw
*
* To detect whether the application is falling behind in processing, you
* can use the
* Each Amazon Kinesis record includes a value,
*
* Represents the input for GetRecords.
*
* Gets an Amazon Kinesis shard iterator. A shard iterator expires five
* minutes after it is returned to the requester.
*
* A shard iterator specifies the shard position from which to start reading
* data records sequentially. The position is specified using the sequence
* number of a data record in a shard. A sequence number is the identifier
* associated with every record ingested in the stream, and is assigned when
* a record is put into the stream. Each stream has one or more shards.
*
* You must specify the shard iterator type. For example, you can set the
*
* When you read repeatedly from a stream, use a GetShardIterator
* request to get the first shard iterator for use in your first
* GetRecords request and for subsequent reads use the shard iterator
* returned by the GetRecords request in
*
* If a GetShardIterator request is made too often, you receive a
*
* If the shard is closed, GetShardIterator returns a valid iterator
* for the last sequence number of the shard. A shard can be closed as a
* result of using SplitShard or MergeShards.
*
* GetShardIterator has a limit of five transactions per second per
* account per open shard.
*
* Represents the input for
* Increases the Kinesis data stream's retention period, which is the length
* of time data records are accessible after they are added to the stream.
* The maximum value of a stream's retention period is 168 hours (7 days).
*
* If you choose a longer stream retention period, this operation increases
* the time period during which records that have not yet expired are
* accessible. However, it does not make previous, expired data (older than
* the stream's previous retention period) accessible after the operation
* has been called. For example, if a stream's retention period is set to 24
* hours and is increased to 168 hours, any data that is older than 24 hours
* remains inaccessible to consumer applications.
*
* Represents the input for IncreaseStreamRetentionPeriod.
*
* Lists the shards in a stream and provides information about each shard.
*
* This API is a new operation that is used by the Amazon Kinesis Client
* Library (KCL). If you have a fine-grained IAM policy that only allows
* specific operations, you must update your policy to allow calls to this
* API. For more information, see Controlling Access to Amazon Kinesis Data Streams Resources Using
* IAM.
*
* Lists your Kinesis data streams.
*
* The number of streams may be too large to return from a single call to
*
* You can detect if there are more streams available to list by using the
*
* ListStreams has a limit of five transactions per second per
* account.
*
* Represents the input for
* Lists the tags for the specified Kinesis data stream. This operation has
* a limit of five transactions per second per account.
*
* Represents the input for
* Merges two adjacent shards in a Kinesis data stream and combines them
* into a single shard to reduce the stream's capacity to ingest and
* transport data. Two shards are considered adjacent if the union of the
* hash key ranges for the two shards form a contiguous set with no gaps.
* For example, if you have two shards, one with a hash key range of
* 276...381 and the other with a hash key range of 382...454, then you
* could merge these two shards into a single shard that would have a hash
* key range of 276...454. After the merge, the single child shard receives
* data for all hash key values covered by the two parent shards.
*
*
* If the stream is in the
* You can use DescribeStream to check the state of the stream, which
* is returned in
*
* You use DescribeStream to determine the shard IDs that are
* specified in the
* If you try to operate on too many streams in parallel using
* CreateStream, DeleteStream,
*
* Represents the input for
* Writes a single data record into an Amazon Kinesis data stream. Call
*
* You must specify the name of the stream that captures, stores, and
* transports the data; a partition key; and the data blob itself.
*
* The data blob can be any type of data; for example, a segment from a log
* file, geographic/location data, website clickstream data, and so on.
*
* The partition key is used by Kinesis Data Streams to distribute data
* across shards. Kinesis Data Streams segregates the data records that
* belong to a stream into multiple shards, using the partition key
* associated with each data record to determine the shard to which a given
* data record belongs.
*
* Partition keys are Unicode strings, with a maximum length limit of 256
* characters for each key. An MD5 hash function is used to map partition
* keys to 128-bit integer values and to map associated data records to
* shards using the hash key ranges of the shards. You can override hashing
* the partition key to determine the shard by explicitly specifying a hash
* value using the
*
* Sequence numbers increase over time and are specific to a shard within a
* stream, not across all shards within a stream. To guarantee strictly
* increasing ordering, write serially to a shard and use the
*
* If a
* By default, data records are accessible for 24 hours from the time that
* they are added to a stream. You can use
* IncreaseStreamRetentionPeriod or
* DecreaseStreamRetentionPeriod to modify this retention period.
*
* Represents the input for
* Writes multiple data records into a Kinesis data stream in a single call
* (also referred to as a
* Each
* You must specify the name of the stream that captures, stores, and
* transports the data; and an array of request
* The data blob can be any type of data; for example, a segment from a log
* file, geographic/location data, website clickstream data, and so on.
*
* The partition key is used by Kinesis Data Streams as input to a hash
* function that maps the partition key and associated data to a specific
* shard. An MD5 hash function is used to map partition keys to 128-bit
* integer values and to map associated data records to shards. As a result
* of this hashing mechanism, all data records with the same partition key
* map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams
* Developer Guide.
*
* Each record in the
* The
* The response
* A successfully processed record includes
* An unsuccessfully processed record includes
* By default, data records are accessible for 24 hours from the time that
* they are added to a stream. You can use
* IncreaseStreamRetentionPeriod or
* DecreaseStreamRetentionPeriod to modify this retention period.
*
* A
* Removes tags from the specified Kinesis data stream. Removed tags are
* deleted and cannot be recovered after this operation successfully
* completes.
*
* If you specify a tag that does not exist, it is ignored.
*
* RemoveTagsFromStream has a limit of five transactions per second
* per account.
*
* Represents the input for
* Splits a shard into two new shards in the Kinesis data stream, to
* increase the stream's capacity to ingest and transport data.
*
* You can also use
* You must specify the shard to be split and the new hash key, which is the
* position in the shard where the shard gets split in two. In many cases,
* the new hash key might be the average of the beginning and ending hash
* key, but it can be any hash key value in the range being mapped into the
* shard. For more information, see Split a Shard in the Amazon Kinesis Data Streams Developer
* Guide.
*
* You can use DescribeStream to determine the shard ID and hash key
* values for the
*
* You can use
* If the specified stream does not exist,
* For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer
* Guide. To increase this limit, contact AWS Support.
*
* If you try to operate on too many streams simultaneously using
* CreateStream, DeleteStream, MergeShards, and/or
* SplitShard, you receive a
*
* Represents the input for
* Enables or updates server-side encryption using an AWS KMS key for a
* specified stream.
*
* Starting encryption is an asynchronous operation. Upon receiving the
* request, Kinesis Data Streams returns immediately and sets the status of
* the stream to
* API Limits: You can successfully apply a new AWS KMS key for server-side
* encryption 25 times in a rolling 24-hour period.
*
* Note: It can take up to five seconds after the stream is in an
*
* Disables server-side encryption for a specified stream.
*
* Stopping encryption is an asynchronous operation. Upon receiving the
* request, Kinesis Data Streams returns immediately and sets the status of
* the stream to
* API Limits: You can successfully disable server-side encryption 25 times
* in a rolling 24-hour period.
*
* Note: It can take up to five seconds after the stream is in an
*
* Updates the shard count of the specified stream to the specified number
* of shards.
*
* Updating the shard count is an asynchronous operation. Upon receiving the
* request, Kinesis Data Streams returns immediately and sets the status of
* the stream to
* To update the shard count, Kinesis Data Streams performs splits or merges
* on individual shards. This can cause short-lived shards to be created, in
* addition to the final shards. We recommend that you double or halve the
* shard count, as this results in the fewest number of splits or merges.
*
* This operation has the following limits. You cannot do the following:
*
* Scale more than twice per rolling 24-hour period per stream
*
* Scale up to more than double your current shard count for a stream
*
* Scale down below half your current shard count for a stream
*
* Scale up to more than 500 shards in a stream
*
* Scale a stream with more than 500 shards down unless the result is less
* than 500 shards
*
* Scale up to more than the shard limit for your account
*
* For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer
* Guide. To request an increase in the call rate limit, the shard limit
* for this API, or your overall shard limit, use the limits form.
*
* Creates a Kinesis data stream. A stream captures and transports data
* records that are continuously emitted from different data sources or
* producers. Scale-out within a stream is explicitly supported by
* means of shards, which are uniquely identified groups of data records in
* a stream.
*
* You specify and control the number of shards that a stream is composed
* of. Each shard can support reads up to five transactions per second, up
* to a maximum data read total of 2 MB per second. Each shard can support
* writes up to 1,000 records per second, up to a maximum data write total
* of 1 MB per second. If the amount of data input increases or decreases,
* you can add or remove shards.
*
* The stream name identifies the stream. The name is scoped to the AWS
* account used by the application. It is also scoped by AWS Region. That
* is, two streams in two different accounts can have the same name, and two
* streams in the same account, but in two different Regions, can have the
* same name.
*
*
* You receive a
* Have more than five streams in the
* Create more shards than are authorized for your account.
*
* For the default shard limit for an AWS account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data
* Streams Developer Guide. To increase this limit, contact AWS Support.
*
* You can use
* CreateStream has a limit of five transactions per second per
* account.
*
* A name to identify the stream. The stream name is scoped to
* the AWS account used by the application that creates the
* stream. It is also scoped by AWS Region. That is, two streams
* in two different AWS accounts can have the same name. Two
* streams in the same AWS account but in two different Regions
* can also have the same name.
*
* The number of shards that the stream will use. The throughput
* of the stream is a function of the number of shards; more
* shards are required for greater provisioned throughput.
*
* DefaultShardLimit;
*
* Deletes a Kinesis data stream and all its shards and data. You must shut
* down any applications that are operating on the stream before you delete
* the stream. If an application attempts to operate on a deleted stream, it
* receives the exception
* If the stream is in the
* Note: Kinesis Data Streams might continue to accept data read and
* write operations, such as PutRecord, PutRecords, and
* GetRecords, on a stream in the
* When you delete a stream, any shards in that stream are also deleted, and
* any tags are dissociated from the stream.
*
* You can use the DescribeStream operation to check the state of the
* stream, which is returned in
* DeleteStream has a limit of five transactions per second per
* account.
*
* The name of the stream to delete.
*
* Describes the specified Kinesis data stream.
*
* The information returned includes the stream name, Amazon Resource Name
* (ARN), creation time, enhanced metric configuration, and shard map. The
* shard map is an array of shard objects. For each shard object, there is
* the hash key and sequence number ranges that the shard spans, and the IDs
* of any earlier shards that played in a role in creating the shard. Every
* record ingested in the stream is identified by a sequence number, which
* is assigned when the record is put into the stream.
*
* You can limit the number of shards returned by each call. For more
* information, see Retrieving Shards from a Stream in the Amazon Kinesis Data
* Streams Developer Guide.
*
* There are no guarantees about the chronological order shards returned. To
* process shards in chronological order, use the ID of the parent shard to
* track the lineage to the oldest shard.
*
* This operation has a limit of 10 transactions per second per account.
*
* The name of the stream to describe.
*
* Describes the specified Kinesis data stream.
*
* The information returned includes the stream name, Amazon Resource Name
* (ARN), creation time, enhanced metric configuration, and shard map. The
* shard map is an array of shard objects. For each shard object, there is
* the hash key and sequence number ranges that the shard spans, and the IDs
* of any earlier shards that played in a role in creating the shard. Every
* record ingested in the stream is identified by a sequence number, which
* is assigned when the record is put into the stream.
*
* You can limit the number of shards returned by each call. For more
* information, see Retrieving Shards from a Stream in the Amazon Kinesis Data
* Streams Developer Guide.
*
* There are no guarantees about the chronological order shards returned. To
* process shards in chronological order, use the ID of the parent shard to
* track the lineage to the oldest shard.
*
* This operation has a limit of 10 transactions per second per account.
*
* The name of the stream to describe.
*
* The shard ID of the shard to start with.
*
* Describes the specified Kinesis data stream.
*
* The information returned includes the stream name, Amazon Resource Name
* (ARN), creation time, enhanced metric configuration, and shard map. The
* shard map is an array of shard objects. For each shard object, there is
* the hash key and sequence number ranges that the shard spans, and the IDs
* of any earlier shards that played in a role in creating the shard. Every
* record ingested in the stream is identified by a sequence number, which
* is assigned when the record is put into the stream.
*
* You can limit the number of shards returned by each call. For more
* information, see Retrieving Shards from a Stream in the Amazon Kinesis Data
* Streams Developer Guide.
*
* There are no guarantees about the chronological order shards returned. To
* process shards in chronological order, use the ID of the parent shard to
* track the lineage to the oldest shard.
*
* This operation has a limit of 10 transactions per second per account.
*
* The name of the stream to describe.
*
* The maximum number of shards to return in a single call. The
* default value is 100. If you specify a value greater than 100,
* at most 100 shards are returned.
*
* The shard ID of the shard to start with.
*
* Lists your Kinesis data streams.
*
* The number of streams may be too large to return from a single call to
*
* You can detect if there are more streams available to list by using the
*
* ListStreams has a limit of five transactions per second per
* account.
*
* Lists your Kinesis data streams.
*
* The number of streams may be too large to return from a single call to
*
* You can detect if there are more streams available to list by using the
*
* ListStreams has a limit of five transactions per second per
* account.
*
* The name of the stream to start the list with.
*
* Lists your Kinesis data streams.
*
* The number of streams may be too large to return from a single call to
*
* You can detect if there are more streams available to list by using the
*
* ListStreams has a limit of five transactions per second per
* account.
*
* The maximum number of streams to list.
*
* The name of the stream to start the list with.
*
* Merges two adjacent shards in a Kinesis data stream and combines them
* into a single shard to reduce the stream's capacity to ingest and
* transport data. Two shards are considered adjacent if the union of the
* hash key ranges for the two shards form a contiguous set with no gaps.
* For example, if you have two shards, one with a hash key range of
* 276...381 and the other with a hash key range of 382...454, then you
* could merge these two shards into a single shard that would have a hash
* key range of 276...454. After the merge, the single child shard receives
* data for all hash key values covered by the two parent shards.
*
*
* If the stream is in the
* You can use DescribeStream to check the state of the stream, which
* is returned in
*
* You use DescribeStream to determine the shard IDs that are
* specified in the
* If you try to operate on too many streams in parallel using
* CreateStream, DeleteStream,
*
* The name of the stream for the merge.
*
* The shard ID of the shard to combine with the adjacent shard
* for the merge.
*
* The shard ID of the adjacent shard for the merge.
*
* Splits a shard into two new shards in the Kinesis data stream, to
* increase the stream's capacity to ingest and transport data.
*
* You can also use
* You must specify the shard to be split and the new hash key, which is the
* position in the shard where the shard gets split in two. In many cases,
* the new hash key might be the average of the beginning and ending hash
* key, but it can be any hash key value in the range being mapped into the
* shard. For more information, see Split a Shard in the Amazon Kinesis Data Streams Developer
* Guide.
*
* You can use DescribeStream to determine the shard ID and hash key
* values for the
*
* You can use
* If the specified stream does not exist,
* For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer
* Guide. To increase this limit, contact AWS Support.
*
* If you try to operate on too many streams simultaneously using
* CreateStream, DeleteStream, MergeShards, and/or
* SplitShard, you receive a
*
* The name of the stream for the shard split.
*
* The shard ID of the shard to split.
*
* A hash key value for the starting hash key of one of the child
* shards created by the split. The hash key range for a given
* shard constitutes a set of ordered contiguous positive
* integers. The value for
* Writes a single data record into an Amazon Kinesis data stream. Call
*
* You must specify the name of the stream that captures, stores, and
* transports the data; a partition key; and the data blob itself.
*
* The data blob can be any type of data; for example, a segment from a log
* file, geographic/location data, website clickstream data, and so on.
*
* The partition key is used by Kinesis Data Streams to distribute data
* across shards. Kinesis Data Streams segregates the data records that
* belong to a stream into multiple shards, using the partition key
* associated with each data record to determine the shard to which a given
* data record belongs.
*
* Partition keys are Unicode strings, with a maximum length limit of 256
* characters for each key. An MD5 hash function is used to map partition
* keys to 128-bit integer values and to map associated data records to
* shards using the hash key ranges of the shards. You can override hashing
* the partition key to determine the shard by explicitly specifying a hash
* value using the
*
* Sequence numbers increase over time and are specific to a shard within a
* stream, not across all shards within a stream. To guarantee strictly
* increasing ordering, write serially to a shard and use the
*
* If a
* By default, data records are accessible for 24 hours from the time that
* they are added to a stream. You can use
* IncreaseStreamRetentionPeriod or
* DecreaseStreamRetentionPeriod to modify this retention period.
*
* The name of the stream to put the data record into.
*
* The data blob to put into the record, which is base64-encoded
* when the blob is serialized. When the data blob (the payload
* before base64-encoding) is added to the partition key size,
* the total size must not exceed the maximum record size (1 MB).
*
* Determines which shard in the stream the data record is
* assigned to. Partition keys are Unicode strings with a maximum
* length limit of 256 characters for each key. Amazon Kinesis
* Data Streams uses the partition key as input to a hash
* function that maps the partition key and associated data to a
* specific shard. Specifically, an MD5 hash function is used to
* map partition keys to 128-bit integer values and to map
* associated data records to shards. As a result of this hashing
* mechanism, all data records with the same partition key map to
* the same shard within the stream.
*
* Writes a single data record into an Amazon Kinesis data stream. Call
*
* You must specify the name of the stream that captures, stores, and
* transports the data; a partition key; and the data blob itself.
*
* The data blob can be any type of data; for example, a segment from a log
* file, geographic/location data, website clickstream data, and so on.
*
* The partition key is used by Kinesis Data Streams to distribute data
* across shards. Kinesis Data Streams segregates the data records that
* belong to a stream into multiple shards, using the partition key
* associated with each data record to determine the shard to which a given
* data record belongs.
*
* Partition keys are Unicode strings, with a maximum length limit of 256
* characters for each key. An MD5 hash function is used to map partition
* keys to 128-bit integer values and to map associated data records to
* shards using the hash key ranges of the shards. You can override hashing
* the partition key to determine the shard by explicitly specifying a hash
* value using the
*
* Sequence numbers increase over time and are specific to a shard within a
* stream, not across all shards within a stream. To guarantee strictly
* increasing ordering, write serially to a shard and use the
*
* If a
* By default, data records are accessible for 24 hours from the time that
* they are added to a stream. You can use
* IncreaseStreamRetentionPeriod or
* DecreaseStreamRetentionPeriod to modify this retention period.
*
* The name of the stream to put the data record into.
*
* The data blob to put into the record, which is base64-encoded
* when the blob is serialized. When the data blob (the payload
* before base64-encoding) is added to the partition key size,
* the total size must not exceed the maximum record size (1 MB).
*
* Determines which shard in the stream the data record is
* assigned to. Partition keys are Unicode strings with a maximum
* length limit of 256 characters for each key. Amazon Kinesis
* Data Streams uses the partition key as input to a hash
* function that maps the partition key and associated data to a
* specific shard. Specifically, an MD5 hash function is used to
* map partition keys to 128-bit integer values and to map
* associated data records to shards. As a result of this hashing
* mechanism, all data records with the same partition key map to
* the same shard within the stream.
*
* Guarantees strictly increasing sequence numbers, for puts from
* the same client and to the same partition key. Usage: set the
*
* Gets an Amazon Kinesis shard iterator. A shard iterator expires five
* minutes after it is returned to the requester.
*
* A shard iterator specifies the shard position from which to start reading
* data records sequentially. The position is specified using the sequence
* number of a data record in a shard. A sequence number is the identifier
* associated with every record ingested in the stream, and is assigned when
* a record is put into the stream. Each stream has one or more shards.
*
* You must specify the shard iterator type. For example, you can set the
*
* When you read repeatedly from a stream, use a GetShardIterator
* request to get the first shard iterator for use in your first
* GetRecords request and for subsequent reads use the shard iterator
* returned by the GetRecords request in
*
* If a GetShardIterator request is made too often, you receive a
*
* If the shard is closed, GetShardIterator returns a valid iterator
* for the last sequence number of the shard. A shard can be closed as a
* result of using SplitShard or MergeShards.
*
* GetShardIterator has a limit of five transactions per second per
* account per open shard.
*
* The name of the Amazon Kinesis data stream.
*
* The shard ID of the Kinesis Data Streams shard to get the
* iterator for.
*
* Determines how the shard iterator is used to start reading
* data records from the shard.
*
* The following are the valid Amazon Kinesis shard iterator
* types:
*
* AT_SEQUENCE_NUMBER - Start reading from the position denoted
* by a specific sequence number, provided in the value
*
* AFTER_SEQUENCE_NUMBER - Start reading right after the position
* denoted by a specific sequence number, provided in the value
*
* AT_TIMESTAMP - Start reading from the position denoted by a
* specific time stamp, provided in the value
*
* TRIM_HORIZON - Start reading at the last untrimmed record in
* the shard in the system, which is the oldest data record in
* the shard.
*
* LATEST - Start reading just after the most recent record in
* the shard, so that you always read the most recent data in the
* shard.
*
* Gets an Amazon Kinesis shard iterator. A shard iterator expires five
* minutes after it is returned to the requester.
*
* A shard iterator specifies the shard position from which to start reading
* data records sequentially. The position is specified using the sequence
* number of a data record in a shard. A sequence number is the identifier
* associated with every record ingested in the stream, and is assigned when
* a record is put into the stream. Each stream has one or more shards.
*
* You must specify the shard iterator type. For example, you can set the
*
* When you read repeatedly from a stream, use a GetShardIterator
* request to get the first shard iterator for use in your first
* GetRecords request and for subsequent reads use the shard iterator
* returned by the GetRecords request in
*
* If a GetShardIterator request is made too often, you receive a
*
* If the shard is closed, GetShardIterator returns a valid iterator
* for the last sequence number of the shard. A shard can be closed as a
* result of using SplitShard or MergeShards.
*
* GetShardIterator has a limit of five transactions per second per
* account per open shard.
*
* The name of the Amazon Kinesis data stream.
*
* The shard ID of the Kinesis Data Streams shard to get the
* iterator for.
*
* Determines how the shard iterator is used to start reading
* data records from the shard.
*
* The following are the valid Amazon Kinesis shard iterator
* types:
*
* AT_SEQUENCE_NUMBER - Start reading from the position denoted
* by a specific sequence number, provided in the value
*
* AFTER_SEQUENCE_NUMBER - Start reading right after the position
* denoted by a specific sequence number, provided in the value
*
* AT_TIMESTAMP - Start reading from the position denoted by a
* specific time stamp, provided in the value
*
* TRIM_HORIZON - Start reading at the last untrimmed record in
* the shard in the system, which is the oldest data record in
* the shard.
*
* LATEST - Start reading just after the most recent record in
* the shard, so that you always read the most recent data in the
* shard.
*
* The sequence number of the data record in the shard from which
* to start reading. Used with shard iterator type
* AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER.
*
* Response metadata is only cached for a limited period of time, so if you
* need to access this extra diagnostic information for an executed request,
* you should use this method to retrieve it as soon as possible after
* executing the request.
*
* @param request The originally executed request
* @return The response metadata for the specified request, or null if none
* is available.
* @deprecated ResponseMetadata cache can hold up to 50 requests and
* responses in memory and will cause memory issue. This method
* now always returns null.
*/
@Deprecated
public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
return client.getResponseMetadataForRequest(request);
}
private AddTagsToStream
overwrites any existing tags that correspond
* to the specified tag keys.
* AddTagsToStream
.
* CreateStream
is an asynchronous operation. Upon receiving a
* CreateStream
request, Kinesis Data Streams immediately
* returns and sets the stream status to CREATING
. After the
* stream is created, Kinesis Data Streams sets the stream status to
* ACTIVE
. You should perform read and write operations only on
* an ACTIVE
stream.
* LimitExceededException
when making a
* CreateStream
request when you try to do one of the
* following:
*
*
* CREATING
state at any
* point in time.
* DescribeStream
to check the stream status, which
* is returned in StreamStatus
.
* CreateStream
.
* ResourceNotFoundException
.
* ACTIVE
state, you can delete it.
* After a DeleteStream
request, the specified stream is in the
* DELETING
state until Kinesis Data Streams completes the
* deletion.
* DELETING
state until
* the stream deletion is complete.
* StreamStatus
.
* DescribeStream
.
* ShardIterator
parameter.
* The shard iterator specifies the position in the shard from which you
* want to start reading data records sequentially. If there are no records
* available in the portion of the shard that the iterator points to,
* GetRecords returns an empty list. It might take multiple calls to
* get to a portion of the shard that contains records.
* NextShardIterator
. Specify the shard iterator returned in
* NextShardIterator
in subsequent calls to GetRecords.
* If the shard has been closed, the shard iterator can't return more data
* and GetRecords returns null
in
* NextShardIterator
. You can terminate the loop when the shard
* is closed, or when the shard iterator reaches the record with the
* sequence number or other attribute that marks it as the last record to
* process.
* Limit
parameter to
* specify the maximum number of records that GetRecords can return.
* Consider your average record size when determining this limit.
* ProvisionedThroughputExceededException
. If there is
* insufficient provisioned throughput on the stream, subsequent calls made
* within the next one second throw
* ProvisionedThroughputExceededException
. GetRecords
* won't return any data when it throws an exception. For this reason, we
* recommend that you wait one second between calls to GetRecords;
* however, it's possible that the application will get exceptions for
* longer than 1 second.
* MillisBehindLatest
response attribute. You can
* also monitor the stream using CloudWatch metrics and other mechanisms
* (see Monitoring in the Amazon Kinesis Data Streams Developer
* Guide).
* ApproximateArrivalTimestamp
, that is set when a stream
* successfully receives and stores a record. This is commonly referred to
* as a server-side time stamp, whereas a client-side time stamp is set when
* a data producer creates or sends the record to a stream (a data producer
* is any data source putting data records into a stream, for example with
* PutRecords). The time stamp has millisecond precision. There are
* no guarantees about the time stamp accuracy, or that the time stamp is
* always increasing. For example, records in a shard or across a stream
* might have time stamps that are out of order.
* ShardIteratorType
parameter to read exactly from the
* position denoted by a specific sequence number by using the
* AT_SEQUENCE_NUMBER
shard iterator type. Alternatively, the
* parameter can read right after the sequence number by using the
* AFTER_SEQUENCE_NUMBER
shard iterator type, using sequence
* numbers returned by earlier calls to PutRecord, PutRecords,
* GetRecords, or DescribeStream. In the request, you can
* specify the shard iterator type AT_TIMESTAMP
to read records
* from an arbitrary point in time, TRIM_HORIZON
to cause
* ShardIterator
to point to the last untrimmed record in the
* shard in the system (the oldest data record in the shard), or
* LATEST
so that you always read the most recent data in the
* shard.
* NextShardIterator
. A new shard iterator is returned by every
* GetRecords request in NextShardIterator
, which you
* use in the ShardIterator
parameter of the next
* GetRecords request.
* ProvisionedThroughputExceededException
. For more information
* about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer
* Guide.
* GetShardIterator
.
* ListStreams
. You can limit the number of returned streams
* using the Limit
parameter. If you do not specify a value for
* the Limit
parameter, Kinesis Data Streams uses the default
* limit, which is currently 10.
* HasMoreStreams
flag from the returned output. If there are
* more streams available, you can request more streams by using the name of
* the last stream returned by the ListStreams
request in the
* ExclusiveStartStreamName
parameter in a subsequent request
* to ListStreams
. The group of stream names returned by the
* subsequent request is then added to the list. You can continue this
* process until all the stream names have been collected in the list.
* ListStreams
.
* ListTagsForStream
.
* MergeShards
is called when there is a need to reduce the
* overall capacity of a stream because of excess capacity that is not being
* used. You must specify the shard to be merged and the adjacent shard for
* a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Data Streams Developer
* Guide.
* ACTIVE
state, you can call
* MergeShards
. If a stream is in the CREATING
,
* UPDATING
, or DELETING
state,
* MergeShards
returns a ResourceInUseException
.
* If the specified stream does not exist, MergeShards
returns
* a ResourceNotFoundException
.
* StreamStatus
.
* MergeShards
is an asynchronous operation. Upon receiving a
* MergeShards
request, Amazon Kinesis Data Streams immediately
* returns a response and sets the StreamStatus
to
* UPDATING
. After the operation is completed, Kinesis Data
* Streams sets the StreamStatus
to ACTIVE
. Read
* and write operations continue to work while the stream is in the
* UPDATING
state.
* MergeShards
request.
* MergeShards
, or
* SplitShard, you receive a LimitExceededException
.
* MergeShards
has a limit of five transactions per second per
* account.
* MergeShards
.
* PutRecord
to send data into the stream for real-time
* ingestion and subsequent processing, one record at a time. Each shard can
* support writes up to 1,000 records per second, up to a maximum data write
* total of 1 MB per second.
* ExplicitHashKey
parameter. For more
* information, see Adding Data to a Stream in the Amazon Kinesis Data Streams
* Developer Guide.
* PutRecord
returns the shard ID of where the data record was
* placed and the sequence number that was assigned to the data record.
* SequenceNumberForOrdering
parameter. For more information,
* see Adding Data to a Stream in the Amazon Kinesis Data Streams
* Developer Guide.
* PutRecord
request cannot be processed because of
* insufficient provisioned throughput on the shard involved in the request,
* PutRecord
throws
* ProvisionedThroughputExceededException
.
* PutRecord
.
* PutRecords
request). Use this
* operation to send data into the stream for data ingestion and processing.
* PutRecords
request can support up to 500 records. Each
* record in the request can be as large as 1 MB, up to a limit of 5 MB for
* the entire request, including partition keys. Each shard can support
* writes up to 1,000 records per second, up to a maximum data write total
* of 1 MB per second.
* Records
, with
* each record in the array requiring a partition key and data blob. The
* record size limit applies to the total size of the partition key and data
* blob.
* Records
array may include an optional
* parameter, ExplicitHashKey
, which overrides the partition
* key to shard mapping. This parameter allows a data producer to determine
* explicitly the shard where the record is stored. For more information,
* see Adding Multiple Records with PutRecords in the Amazon Kinesis
* Data Streams Developer Guide.
* PutRecords
response includes an array of response
* Records
. Each record in the response array directly
* correlates with a record in the request array using natural ordering,
* from the top to the bottom of the request and response. The response
* Records
array always includes the same number of records as
* the request array.
* Records
array includes both successfully and
* unsuccessfully processed records. Kinesis Data Streams attempts to
* process all records in each PutRecords
request. A single
* record failure does not stop the processing of subsequent records.
* ShardId
and
* SequenceNumber
values. The ShardId
parameter
* identifies the shard in the stream where the record is stored. The
* SequenceNumber
parameter is an identifier assigned to the
* put record, unique to all records in the stream.
* ErrorCode
and
* ErrorMessage
values. ErrorCode
reflects the
* type of error and can be one of the following values:
* ProvisionedThroughputExceededException
or
* InternalFailure
. ErrorMessage
provides more
* detailed information about the
* ProvisionedThroughputExceededException
exception including
* the account ID, stream name, and shard ID of the record that was
* throttled. For more information about partially successful responses, see
* Adding Multiple Records with PutRecords in the Amazon Kinesis
* Data Streams Developer Guide.
* PutRecords
request.
* RemoveTagsFromStream
.
* SplitShard
is called when there is a need to increase the
* overall capacity of a stream because of an expected increase in the
* volume of data records being ingested.
* SplitShard
when a shard appears to be
* approaching its maximum utilization; for example, the producers sending
* data into the specific shard are suddenly sending more than previously
* anticipated. You can also call SplitShard
to increase stream
* capacity, so that more Kinesis Data Streams applications can
* simultaneously read data from the stream for real-time processing.
* ShardToSplit
and
* NewStartingHashKey
parameters that are specified in the
* SplitShard
request.
* SplitShard
is an asynchronous operation. Upon receiving a
* SplitShard
request, Kinesis Data Streams immediately returns
* a response and sets the stream status to UPDATING
. After the
* operation is completed, Kinesis Data Streams sets the stream status to
* ACTIVE
. Read and write operations continue to work while the
* stream is in the UPDATING
state.
* DescribeStream
to check the status of the
* stream, which is returned in StreamStatus
. If the stream is
* in the ACTIVE
state, you can call SplitShard
.
* If a stream is in CREATING
or UPDATING
or
* DELETING
states, DescribeStream
returns a
* ResourceInUseException
.
* DescribeStream
* returns a ResourceNotFoundException
. If you try to create
* more shards than are authorized for your account, you receive a
* LimitExceededException
.
* LimitExceededException
.
* SplitShard
has a limit of five transactions per second per
* account.
* SplitShard
.
* UPDATING
. After the update is complete,
* Kinesis Data Streams sets the status of the stream back to
* ACTIVE
. Updating or applying encryption normally takes a few
* seconds to complete, but it can take minutes. You can continue to read
* and write data to your stream while its status is UPDATING
.
* Once the status of the stream is ACTIVE
, encryption begins
* for records written to the stream.
* ACTIVE
status before all records written to the stream are
* encrypted. After you enable encryption, you can verify that encryption is
* applied by inspecting the API response from PutRecord
or
* PutRecords
.
* UPDATING
. After the update is complete,
* Kinesis Data Streams sets the status of the stream back to
* ACTIVE
. Stopping encryption normally takes a few seconds to
* complete, but it can take minutes. You can continue to read and write
* data to your stream while its status is UPDATING
. Once the
* status of the stream is ACTIVE
, records written to the
* stream are no longer encrypted by Kinesis Data Streams.
* ACTIVE
status before all records written to the stream are
* no longer subject to encryption. After you disabled encryption, you can
* verify that encryption is not applied by inspecting the API response from
* PutRecord
or PutRecords
.
* UPDATING
. After the update is complete,
* Kinesis Data Streams sets the status of the stream back to
* ACTIVE
. Depending on the size of the stream, the scaling
* action could take a few minutes to complete. You can continue to read and
* write data to your stream while its status is UPDATING
.
*
*
* CreateStream
is an asynchronous operation. Upon receiving a
* CreateStream
request, Kinesis Data Streams immediately
* returns and sets the stream status to CREATING
. After the
* stream is created, Kinesis Data Streams sets the stream status to
* ACTIVE
. You should perform read and write operations only on
* an ACTIVE
stream.
* LimitExceededException
when making a
* CreateStream
request when you try to do one of the
* following:
*
*
* CREATING
state at any
* point in time.
* DescribeStream
to check the stream status, which
* is returned in StreamStatus
.
* ResourceNotFoundException
.
* ACTIVE
state, you can delete it.
* After a DeleteStream
request, the specified stream is in the
* DELETING
state until Kinesis Data Streams completes the
* deletion.
* DELETING
state until
* the stream deletion is complete.
* StreamStatus
.
* ListStreams
. You can limit the number of returned streams
* using the Limit
parameter. If you do not specify a value for
* the Limit
parameter, Kinesis Data Streams uses the default
* limit, which is currently 10.
* HasMoreStreams
flag from the returned output. If there are
* more streams available, you can request more streams by using the name of
* the last stream returned by the ListStreams
request in the
* ExclusiveStartStreamName
parameter in a subsequent request
* to ListStreams
. The group of stream names returned by the
* subsequent request is then added to the list. You can continue this
* process until all the stream names have been collected in the list.
* ListStreams
. You can limit the number of returned streams
* using the Limit
parameter. If you do not specify a value for
* the Limit
parameter, Kinesis Data Streams uses the default
* limit, which is currently 10.
* HasMoreStreams
flag from the returned output. If there are
* more streams available, you can request more streams by using the name of
* the last stream returned by the ListStreams
request in the
* ExclusiveStartStreamName
parameter in a subsequent request
* to ListStreams
. The group of stream names returned by the
* subsequent request is then added to the list. You can continue this
* process until all the stream names have been collected in the list.
* ListStreams
. You can limit the number of returned streams
* using the Limit
parameter. If you do not specify a value for
* the Limit
parameter, Kinesis Data Streams uses the default
* limit, which is currently 10.
* HasMoreStreams
flag from the returned output. If there are
* more streams available, you can request more streams by using the name of
* the last stream returned by the ListStreams
request in the
* ExclusiveStartStreamName
parameter in a subsequent request
* to ListStreams
. The group of stream names returned by the
* subsequent request is then added to the list. You can continue this
* process until all the stream names have been collected in the list.
* MergeShards
is called when there is a need to reduce the
* overall capacity of a stream because of excess capacity that is not being
* used. You must specify the shard to be merged and the adjacent shard for
* a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Data Streams Developer
* Guide.
* ACTIVE
state, you can call
* MergeShards
. If a stream is in the CREATING
,
* UPDATING
, or DELETING
state,
* MergeShards
returns a ResourceInUseException
.
* If the specified stream does not exist, MergeShards
returns
* a ResourceNotFoundException
.
* StreamStatus
.
* MergeShards
is an asynchronous operation. Upon receiving a
* MergeShards
request, Amazon Kinesis Data Streams immediately
* returns a response and sets the StreamStatus
to
* UPDATING
. After the operation is completed, Kinesis Data
* Streams sets the StreamStatus
to ACTIVE
. Read
* and write operations continue to work while the stream is in the
* UPDATING
state.
* MergeShards
request.
* MergeShards
, or
* SplitShard, you receive a LimitExceededException
.
* MergeShards
has a limit of five transactions per second per
* account.
* SplitShard
is called when there is a need to increase the
* overall capacity of a stream because of an expected increase in the
* volume of data records being ingested.
* SplitShard
when a shard appears to be
* approaching its maximum utilization; for example, the producers sending
* data into the specific shard are suddenly sending more than previously
* anticipated. You can also call SplitShard
to increase stream
* capacity, so that more Kinesis Data Streams applications can
* simultaneously read data from the stream for real-time processing.
* ShardToSplit
and
* NewStartingHashKey
parameters that are specified in the
* SplitShard
request.
* SplitShard
is an asynchronous operation. Upon receiving a
* SplitShard
request, Kinesis Data Streams immediately returns
* a response and sets the stream status to UPDATING
. After the
* operation is completed, Kinesis Data Streams sets the stream status to
* ACTIVE
. Read and write operations continue to work while the
* stream is in the UPDATING
state.
* DescribeStream
to check the status of the
* stream, which is returned in StreamStatus
. If the stream is
* in the ACTIVE
state, you can call SplitShard
.
* If a stream is in CREATING
or UPDATING
or
* DELETING
states, DescribeStream
returns a
* ResourceInUseException
.
* DescribeStream
* returns a ResourceNotFoundException
. If you try to create
* more shards than are authorized for your account, you receive a
* LimitExceededException
.
* LimitExceededException
.
* SplitShard
has a limit of five transactions per second per
* account.
* NewStartingHashKey
must
* be in the range of hash keys being mapped into the shard. The
* NewStartingHashKey
hash key value and all higher
* hash key values in hash key range are distributed to one of
* the child shards. All the lower hash key values in the range
* are distributed to the other child shard.
* PutRecord
to send data into the stream for real-time
* ingestion and subsequent processing, one record at a time. Each shard can
* support writes up to 1,000 records per second, up to a maximum data write
* total of 1 MB per second.
* ExplicitHashKey
parameter. For more
* information, see Adding Data to a Stream in the Amazon Kinesis Data Streams
* Developer Guide.
* PutRecord
returns the shard ID of where the data record was
* placed and the sequence number that was assigned to the data record.
* SequenceNumberForOrdering
parameter. For more information,
* see Adding Data to a Stream in the Amazon Kinesis Data Streams
* Developer Guide.
* PutRecord
request cannot be processed because of
* insufficient provisioned throughput on the shard involved in the request,
* PutRecord
throws
* ProvisionedThroughputExceededException
.
* PutRecord
to send data into the stream for real-time
* ingestion and subsequent processing, one record at a time. Each shard can
* support writes up to 1,000 records per second, up to a maximum data write
* total of 1 MB per second.
* ExplicitHashKey
parameter. For more
* information, see Adding Data to a Stream in the Amazon Kinesis Data Streams
* Developer Guide.
* PutRecord
returns the shard ID of where the data record was
* placed and the sequence number that was assigned to the data record.
* SequenceNumberForOrdering
parameter. For more information,
* see Adding Data to a Stream in the Amazon Kinesis Data Streams
* Developer Guide.
* PutRecord
request cannot be processed because of
* insufficient provisioned throughput on the shard involved in the request,
* PutRecord
throws
* ProvisionedThroughputExceededException
.
* SequenceNumberForOrdering
of record n to
* the sequence number of record n-1 (as returned in the
* result when putting record n-1). If this parameter is
* not set, records are coarsely ordered based on arrival time.
* ShardIteratorType
parameter to read exactly from the
* position denoted by a specific sequence number by using the
* AT_SEQUENCE_NUMBER
shard iterator type. Alternatively, the
* parameter can read right after the sequence number by using the
* AFTER_SEQUENCE_NUMBER
shard iterator type, using sequence
* numbers returned by earlier calls to PutRecord, PutRecords,
* GetRecords, or DescribeStream. In the request, you can
* specify the shard iterator type AT_TIMESTAMP
to read records
* from an arbitrary point in time, TRIM_HORIZON
to cause
* ShardIterator
to point to the last untrimmed record in the
* shard in the system (the oldest data record in the shard), or
* LATEST
so that you always read the most recent data in the
* shard.
* NextShardIterator
. A new shard iterator is returned by every
* GetRecords request in NextShardIterator
, which you
* use in the ShardIterator
parameter of the next
* GetRecords request.
* ProvisionedThroughputExceededException
. For more information
* about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer
* Guide.
*
*
* @return getShardIteratorResult The response from the GetShardIterator
* service method, as returned by Amazon Kinesis Streams.
* @throws ResourceNotFoundException
* @throws InvalidArgumentException
* @throws ProvisionedThroughputExceededException
* @throws AmazonClientException If any internal errors are encountered
* inside the client while attempting to make the request or
* handle the response. For example if a network connection is
* not available.
* @throws AmazonServiceException If an error response is returned by Amazon
* Kinesis Streams indicating either a problem with the data in
* the request, or a server side issue.
*/
public GetShardIteratorResult getShardIterator(String streamName, String shardId,
String shardIteratorType)
throws AmazonServiceException, AmazonClientException {
GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
getShardIteratorRequest.setStreamName(streamName);
getShardIteratorRequest.setShardId(shardId);
getShardIteratorRequest.setShardIteratorType(shardIteratorType);
return getShardIterator(getShardIteratorRequest);
}
/**
* StartingSequenceNumber
.
* StartingSequenceNumber
.
* Timestamp
.
* ShardIteratorType
parameter to read exactly from the
* position denoted by a specific sequence number by using the
* AT_SEQUENCE_NUMBER
shard iterator type. Alternatively, the
* parameter can read right after the sequence number by using the
* AFTER_SEQUENCE_NUMBER
shard iterator type, using sequence
* numbers returned by earlier calls to PutRecord, PutRecords,
* GetRecords, or DescribeStream. In the request, you can
* specify the shard iterator type AT_TIMESTAMP
to read records
* from an arbitrary point in time, TRIM_HORIZON
to cause
* ShardIterator
to point to the last untrimmed record in the
* shard in the system (the oldest data record in the shard), or
* LATEST
so that you always read the most recent data in the
* shard.
* NextShardIterator
. A new shard iterator is returned by every
* GetRecords request in NextShardIterator
, which you
* use in the ShardIterator
parameter of the next
* GetRecords request.
* ProvisionedThroughputExceededException
. For more information
* about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer
* Guide.
*
*
* @param startingSequenceNumber StartingSequenceNumber
.
* StartingSequenceNumber
.
* Timestamp
.
*