/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace TranscribeService { namespace Model { /** */ class StartTranscriptionJobRequest : public TranscribeServiceRequest { public: AWS_TRANSCRIBESERVICE_API StartTranscriptionJobRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "StartTranscriptionJob"; } AWS_TRANSCRIBESERVICE_API Aws::String SerializePayload() const override; AWS_TRANSCRIBESERVICE_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override; /** *

A unique name, chosen by you, for your transcription job. The name that you * specify is also used as the default name of your transcription output file. If * you want to specify a different name for your transcription output, use the * OutputKey parameter.

This name is case sensitive, cannot * contain spaces, and must be unique within an Amazon Web Services account. If you * try to create a new job with the same name as an existing job, you get a * ConflictException error.

*/ inline const Aws::String& GetTranscriptionJobName() const{ return m_transcriptionJobName; } /** *

A unique name, chosen by you, for your transcription job. The name that you * specify is also used as the default name of your transcription output file. If * you want to specify a different name for your transcription output, use the * OutputKey parameter.

This name is case sensitive, cannot * contain spaces, and must be unique within an Amazon Web Services account. If you * try to create a new job with the same name as an existing job, you get a * ConflictException error.

*/ inline bool TranscriptionJobNameHasBeenSet() const { return m_transcriptionJobNameHasBeenSet; } /** *

A unique name, chosen by you, for your transcription job. The name that you * specify is also used as the default name of your transcription output file. If * you want to specify a different name for your transcription output, use the * OutputKey parameter.

This name is case sensitive, cannot * contain spaces, and must be unique within an Amazon Web Services account. If you * try to create a new job with the same name as an existing job, you get a * ConflictException error.

*/ inline void SetTranscriptionJobName(const Aws::String& value) { m_transcriptionJobNameHasBeenSet = true; m_transcriptionJobName = value; } /** *

A unique name, chosen by you, for your transcription job. The name that you * specify is also used as the default name of your transcription output file. If * you want to specify a different name for your transcription output, use the * OutputKey parameter.

This name is case sensitive, cannot * contain spaces, and must be unique within an Amazon Web Services account. If you * try to create a new job with the same name as an existing job, you get a * ConflictException error.

*/ inline void SetTranscriptionJobName(Aws::String&& value) { m_transcriptionJobNameHasBeenSet = true; m_transcriptionJobName = std::move(value); } /** *

A unique name, chosen by you, for your transcription job. The name that you * specify is also used as the default name of your transcription output file. If * you want to specify a different name for your transcription output, use the * OutputKey parameter.

This name is case sensitive, cannot * contain spaces, and must be unique within an Amazon Web Services account. If you * try to create a new job with the same name as an existing job, you get a * ConflictException error.

*/ inline void SetTranscriptionJobName(const char* value) { m_transcriptionJobNameHasBeenSet = true; m_transcriptionJobName.assign(value); } /** *

A unique name, chosen by you, for your transcription job. The name that you * specify is also used as the default name of your transcription output file. If * you want to specify a different name for your transcription output, use the * OutputKey parameter.

This name is case sensitive, cannot * contain spaces, and must be unique within an Amazon Web Services account. If you * try to create a new job with the same name as an existing job, you get a * ConflictException error.

*/ inline StartTranscriptionJobRequest& WithTranscriptionJobName(const Aws::String& value) { SetTranscriptionJobName(value); return *this;} /** *

A unique name, chosen by you, for your transcription job. The name that you * specify is also used as the default name of your transcription output file. If * you want to specify a different name for your transcription output, use the * OutputKey parameter.

This name is case sensitive, cannot * contain spaces, and must be unique within an Amazon Web Services account. If you * try to create a new job with the same name as an existing job, you get a * ConflictException error.

*/ inline StartTranscriptionJobRequest& WithTranscriptionJobName(Aws::String&& value) { SetTranscriptionJobName(std::move(value)); return *this;} /** *

A unique name, chosen by you, for your transcription job. The name that you * specify is also used as the default name of your transcription output file. If * you want to specify a different name for your transcription output, use the * OutputKey parameter.

This name is case sensitive, cannot * contain spaces, and must be unique within an Amazon Web Services account. If you * try to create a new job with the same name as an existing job, you get a * ConflictException error.

*/ inline StartTranscriptionJobRequest& WithTranscriptionJobName(const char* value) { SetTranscriptionJobName(value); return *this;} /** *

The language code that represents the language spoken in the input media * file.

If you're unsure of the language spoken in your media file, * consider using IdentifyLanguage or * IdentifyMultipleLanguages to enable automatic language * identification.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

For a list of * supported languages and their associated language codes, refer to the Supported * languages table.

To transcribe speech in Modern Standard * Arabic (ar-SA), your media file must be encoded at a sample rate of * 16,000 Hz or higher.

*/ inline const LanguageCode& GetLanguageCode() const{ return m_languageCode; } /** *

The language code that represents the language spoken in the input media * file.

If you're unsure of the language spoken in your media file, * consider using IdentifyLanguage or * IdentifyMultipleLanguages to enable automatic language * identification.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

For a list of * supported languages and their associated language codes, refer to the Supported * languages table.

To transcribe speech in Modern Standard * Arabic (ar-SA), your media file must be encoded at a sample rate of * 16,000 Hz or higher.

*/ inline bool LanguageCodeHasBeenSet() const { return m_languageCodeHasBeenSet; } /** *

The language code that represents the language spoken in the input media * file.

If you're unsure of the language spoken in your media file, * consider using IdentifyLanguage or * IdentifyMultipleLanguages to enable automatic language * identification.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

For a list of * supported languages and their associated language codes, refer to the Supported * languages table.

To transcribe speech in Modern Standard * Arabic (ar-SA), your media file must be encoded at a sample rate of * 16,000 Hz or higher.

*/ inline void SetLanguageCode(const LanguageCode& value) { m_languageCodeHasBeenSet = true; m_languageCode = value; } /** *

The language code that represents the language spoken in the input media * file.

If you're unsure of the language spoken in your media file, * consider using IdentifyLanguage or * IdentifyMultipleLanguages to enable automatic language * identification.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

For a list of * supported languages and their associated language codes, refer to the Supported * languages table.

To transcribe speech in Modern Standard * Arabic (ar-SA), your media file must be encoded at a sample rate of * 16,000 Hz or higher.

*/ inline void SetLanguageCode(LanguageCode&& value) { m_languageCodeHasBeenSet = true; m_languageCode = std::move(value); } /** *

The language code that represents the language spoken in the input media * file.

If you're unsure of the language spoken in your media file, * consider using IdentifyLanguage or * IdentifyMultipleLanguages to enable automatic language * identification.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

For a list of * supported languages and their associated language codes, refer to the Supported * languages table.

To transcribe speech in Modern Standard * Arabic (ar-SA), your media file must be encoded at a sample rate of * 16,000 Hz or higher.

*/ inline StartTranscriptionJobRequest& WithLanguageCode(const LanguageCode& value) { SetLanguageCode(value); return *this;} /** *

The language code that represents the language spoken in the input media * file.

If you're unsure of the language spoken in your media file, * consider using IdentifyLanguage or * IdentifyMultipleLanguages to enable automatic language * identification.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

For a list of * supported languages and their associated language codes, refer to the Supported * languages table.

To transcribe speech in Modern Standard * Arabic (ar-SA), your media file must be encoded at a sample rate of * 16,000 Hz or higher.

*/ inline StartTranscriptionJobRequest& WithLanguageCode(LanguageCode&& value) { SetLanguageCode(std::move(value)); return *this;} /** *

The sample rate, in hertz, of the audio track in your input media file.

*

If you don't specify the media sample rate, Amazon Transcribe determines it * for you. If you specify the sample rate, it must match the rate detected by * Amazon Transcribe. If there's a mismatch between the value that you specify and * the value detected, your job fails. In most cases, you can omit * MediaSampleRateHertz and let Amazon Transcribe determine the sample * rate.

*/ inline int GetMediaSampleRateHertz() const{ return m_mediaSampleRateHertz; } /** *

The sample rate, in hertz, of the audio track in your input media file.

*

If you don't specify the media sample rate, Amazon Transcribe determines it * for you. If you specify the sample rate, it must match the rate detected by * Amazon Transcribe. If there's a mismatch between the value that you specify and * the value detected, your job fails. In most cases, you can omit * MediaSampleRateHertz and let Amazon Transcribe determine the sample * rate.

*/ inline bool MediaSampleRateHertzHasBeenSet() const { return m_mediaSampleRateHertzHasBeenSet; } /** *

The sample rate, in hertz, of the audio track in your input media file.

*

If you don't specify the media sample rate, Amazon Transcribe determines it * for you. If you specify the sample rate, it must match the rate detected by * Amazon Transcribe. If there's a mismatch between the value that you specify and * the value detected, your job fails. In most cases, you can omit * MediaSampleRateHertz and let Amazon Transcribe determine the sample * rate.

*/ inline void SetMediaSampleRateHertz(int value) { m_mediaSampleRateHertzHasBeenSet = true; m_mediaSampleRateHertz = value; } /** *

The sample rate, in hertz, of the audio track in your input media file.

*

If you don't specify the media sample rate, Amazon Transcribe determines it * for you. If you specify the sample rate, it must match the rate detected by * Amazon Transcribe. If there's a mismatch between the value that you specify and * the value detected, your job fails. In most cases, you can omit * MediaSampleRateHertz and let Amazon Transcribe determine the sample * rate.

*/ inline StartTranscriptionJobRequest& WithMediaSampleRateHertz(int value) { SetMediaSampleRateHertz(value); return *this;} /** *

Specify the format of your input media file.

*/ inline const MediaFormat& GetMediaFormat() const{ return m_mediaFormat; } /** *

Specify the format of your input media file.

*/ inline bool MediaFormatHasBeenSet() const { return m_mediaFormatHasBeenSet; } /** *

Specify the format of your input media file.

*/ inline void SetMediaFormat(const MediaFormat& value) { m_mediaFormatHasBeenSet = true; m_mediaFormat = value; } /** *

Specify the format of your input media file.

*/ inline void SetMediaFormat(MediaFormat&& value) { m_mediaFormatHasBeenSet = true; m_mediaFormat = std::move(value); } /** *

Specify the format of your input media file.

*/ inline StartTranscriptionJobRequest& WithMediaFormat(const MediaFormat& value) { SetMediaFormat(value); return *this;} /** *

Specify the format of your input media file.

*/ inline StartTranscriptionJobRequest& WithMediaFormat(MediaFormat&& value) { SetMediaFormat(std::move(value)); return *this;} /** *

Describes the Amazon S3 location of the media file you want to use in your * request.

*/ inline const Media& GetMedia() const{ return m_media; } /** *

Describes the Amazon S3 location of the media file you want to use in your * request.

*/ inline bool MediaHasBeenSet() const { return m_mediaHasBeenSet; } /** *

Describes the Amazon S3 location of the media file you want to use in your * request.

*/ inline void SetMedia(const Media& value) { m_mediaHasBeenSet = true; m_media = value; } /** *

Describes the Amazon S3 location of the media file you want to use in your * request.

*/ inline void SetMedia(Media&& value) { m_mediaHasBeenSet = true; m_media = std::move(value); } /** *

Describes the Amazon S3 location of the media file you want to use in your * request.

*/ inline StartTranscriptionJobRequest& WithMedia(const Media& value) { SetMedia(value); return *this;} /** *

Describes the Amazon S3 location of the media file you want to use in your * request.

*/ inline StartTranscriptionJobRequest& WithMedia(Media&& value) { SetMedia(std::move(value)); return *this;} /** *

The name of the Amazon S3 bucket where you want your transcription output * stored. Do not include the S3:// prefix of the specified * bucket.

If you want your output to go to a sub-folder of this bucket, * specify it using the OutputKey parameter; * OutputBucketName only accepts the name of a bucket.

For * example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET, * set OutputBucketName to DOC-EXAMPLE-BUCKET. However, * if you want your output stored in * S3://DOC-EXAMPLE-BUCKET/test-files/, set * OutputBucketName to DOC-EXAMPLE-BUCKET and * OutputKey to test-files/.

Note that Amazon * Transcribe must have permission to use the specified location. You can change * Amazon S3 permissions using the Amazon Web Services Management * Console. See also Permissions * Required for IAM User Roles.

If you don't specify * OutputBucketName, your transcript is placed in a service-managed * Amazon S3 bucket and you are provided with a URI to access your transcript.

*/ inline const Aws::String& GetOutputBucketName() const{ return m_outputBucketName; } /** *

The name of the Amazon S3 bucket where you want your transcription output * stored. Do not include the S3:// prefix of the specified * bucket.

If you want your output to go to a sub-folder of this bucket, * specify it using the OutputKey parameter; * OutputBucketName only accepts the name of a bucket.

For * example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET, * set OutputBucketName to DOC-EXAMPLE-BUCKET. However, * if you want your output stored in * S3://DOC-EXAMPLE-BUCKET/test-files/, set * OutputBucketName to DOC-EXAMPLE-BUCKET and * OutputKey to test-files/.

Note that Amazon * Transcribe must have permission to use the specified location. You can change * Amazon S3 permissions using the Amazon Web Services Management * Console. See also Permissions * Required for IAM User Roles.

If you don't specify * OutputBucketName, your transcript is placed in a service-managed * Amazon S3 bucket and you are provided with a URI to access your transcript.

*/ inline bool OutputBucketNameHasBeenSet() const { return m_outputBucketNameHasBeenSet; } /** *

The name of the Amazon S3 bucket where you want your transcription output * stored. Do not include the S3:// prefix of the specified * bucket.

If you want your output to go to a sub-folder of this bucket, * specify it using the OutputKey parameter; * OutputBucketName only accepts the name of a bucket.

For * example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET, * set OutputBucketName to DOC-EXAMPLE-BUCKET. However, * if you want your output stored in * S3://DOC-EXAMPLE-BUCKET/test-files/, set * OutputBucketName to DOC-EXAMPLE-BUCKET and * OutputKey to test-files/.

Note that Amazon * Transcribe must have permission to use the specified location. You can change * Amazon S3 permissions using the Amazon Web Services Management * Console. See also Permissions * Required for IAM User Roles.

If you don't specify * OutputBucketName, your transcript is placed in a service-managed * Amazon S3 bucket and you are provided with a URI to access your transcript.

*/ inline void SetOutputBucketName(const Aws::String& value) { m_outputBucketNameHasBeenSet = true; m_outputBucketName = value; } /** *

The name of the Amazon S3 bucket where you want your transcription output * stored. Do not include the S3:// prefix of the specified * bucket.

If you want your output to go to a sub-folder of this bucket, * specify it using the OutputKey parameter; * OutputBucketName only accepts the name of a bucket.

For * example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET, * set OutputBucketName to DOC-EXAMPLE-BUCKET. However, * if you want your output stored in * S3://DOC-EXAMPLE-BUCKET/test-files/, set * OutputBucketName to DOC-EXAMPLE-BUCKET and * OutputKey to test-files/.

Note that Amazon * Transcribe must have permission to use the specified location. You can change * Amazon S3 permissions using the Amazon Web Services Management * Console. See also Permissions * Required for IAM User Roles.

If you don't specify * OutputBucketName, your transcript is placed in a service-managed * Amazon S3 bucket and you are provided with a URI to access your transcript.

*/ inline void SetOutputBucketName(Aws::String&& value) { m_outputBucketNameHasBeenSet = true; m_outputBucketName = std::move(value); } /** *

The name of the Amazon S3 bucket where you want your transcription output * stored. Do not include the S3:// prefix of the specified * bucket.

If you want your output to go to a sub-folder of this bucket, * specify it using the OutputKey parameter; * OutputBucketName only accepts the name of a bucket.

For * example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET, * set OutputBucketName to DOC-EXAMPLE-BUCKET. However, * if you want your output stored in * S3://DOC-EXAMPLE-BUCKET/test-files/, set * OutputBucketName to DOC-EXAMPLE-BUCKET and * OutputKey to test-files/.

Note that Amazon * Transcribe must have permission to use the specified location. You can change * Amazon S3 permissions using the Amazon Web Services Management * Console. See also Permissions * Required for IAM User Roles.

If you don't specify * OutputBucketName, your transcript is placed in a service-managed * Amazon S3 bucket and you are provided with a URI to access your transcript.

*/ inline void SetOutputBucketName(const char* value) { m_outputBucketNameHasBeenSet = true; m_outputBucketName.assign(value); } /** *

The name of the Amazon S3 bucket where you want your transcription output * stored. Do not include the S3:// prefix of the specified * bucket.

If you want your output to go to a sub-folder of this bucket, * specify it using the OutputKey parameter; * OutputBucketName only accepts the name of a bucket.

For * example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET, * set OutputBucketName to DOC-EXAMPLE-BUCKET. However, * if you want your output stored in * S3://DOC-EXAMPLE-BUCKET/test-files/, set * OutputBucketName to DOC-EXAMPLE-BUCKET and * OutputKey to test-files/.

Note that Amazon * Transcribe must have permission to use the specified location. You can change * Amazon S3 permissions using the Amazon Web Services Management * Console. See also Permissions * Required for IAM User Roles.

If you don't specify * OutputBucketName, your transcript is placed in a service-managed * Amazon S3 bucket and you are provided with a URI to access your transcript.

*/ inline StartTranscriptionJobRequest& WithOutputBucketName(const Aws::String& value) { SetOutputBucketName(value); return *this;} /** *

The name of the Amazon S3 bucket where you want your transcription output * stored. Do not include the S3:// prefix of the specified * bucket.

If you want your output to go to a sub-folder of this bucket, * specify it using the OutputKey parameter; * OutputBucketName only accepts the name of a bucket.

For * example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET, * set OutputBucketName to DOC-EXAMPLE-BUCKET. However, * if you want your output stored in * S3://DOC-EXAMPLE-BUCKET/test-files/, set * OutputBucketName to DOC-EXAMPLE-BUCKET and * OutputKey to test-files/.

Note that Amazon * Transcribe must have permission to use the specified location. You can change * Amazon S3 permissions using the Amazon Web Services Management * Console. See also Permissions * Required for IAM User Roles.

If you don't specify * OutputBucketName, your transcript is placed in a service-managed * Amazon S3 bucket and you are provided with a URI to access your transcript.

*/ inline StartTranscriptionJobRequest& WithOutputBucketName(Aws::String&& value) { SetOutputBucketName(std::move(value)); return *this;} /** *

The name of the Amazon S3 bucket where you want your transcription output * stored. Do not include the S3:// prefix of the specified * bucket.

If you want your output to go to a sub-folder of this bucket, * specify it using the OutputKey parameter; * OutputBucketName only accepts the name of a bucket.

For * example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET, * set OutputBucketName to DOC-EXAMPLE-BUCKET. However, * if you want your output stored in * S3://DOC-EXAMPLE-BUCKET/test-files/, set * OutputBucketName to DOC-EXAMPLE-BUCKET and * OutputKey to test-files/.

Note that Amazon * Transcribe must have permission to use the specified location. You can change * Amazon S3 permissions using the Amazon Web Services Management * Console. See also Permissions * Required for IAM User Roles.

If you don't specify * OutputBucketName, your transcript is placed in a service-managed * Amazon S3 bucket and you are provided with a URI to access your transcript.

*/ inline StartTranscriptionJobRequest& WithOutputBucketName(const char* value) { SetOutputBucketName(value); return *this;} /** *

Use in combination with OutputBucketName to specify the output * location of your transcript and, optionally, a unique name for your output file. * The default name for your transcription output is the same as the name you * specified for your transcription job (TranscriptionJobName).

*

Here are some examples of how you can use OutputKey:

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName * and 'my-transcript.json' as the OutputKey, your transcription * output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json.

    *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json.

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName and 'test-files/my-transcript.json' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json.

  • *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'test-files/my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json.

    *

If you specify the name of an Amazon S3 bucket sub-folder that * doesn't exist, one is created for you.

*/ inline const Aws::String& GetOutputKey() const{ return m_outputKey; } /** *

Use in combination with OutputBucketName to specify the output * location of your transcript and, optionally, a unique name for your output file. * The default name for your transcription output is the same as the name you * specified for your transcription job (TranscriptionJobName).

*

Here are some examples of how you can use OutputKey:

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName * and 'my-transcript.json' as the OutputKey, your transcription * output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json.

    *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json.

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName and 'test-files/my-transcript.json' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json.

  • *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'test-files/my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json.

    *

If you specify the name of an Amazon S3 bucket sub-folder that * doesn't exist, one is created for you.

*/ inline bool OutputKeyHasBeenSet() const { return m_outputKeyHasBeenSet; } /** *

Use in combination with OutputBucketName to specify the output * location of your transcript and, optionally, a unique name for your output file. * The default name for your transcription output is the same as the name you * specified for your transcription job (TranscriptionJobName).

*

Here are some examples of how you can use OutputKey:

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName * and 'my-transcript.json' as the OutputKey, your transcription * output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json.

    *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json.

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName and 'test-files/my-transcript.json' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json.

  • *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'test-files/my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json.

    *

If you specify the name of an Amazon S3 bucket sub-folder that * doesn't exist, one is created for you.

*/ inline void SetOutputKey(const Aws::String& value) { m_outputKeyHasBeenSet = true; m_outputKey = value; } /** *

Use in combination with OutputBucketName to specify the output * location of your transcript and, optionally, a unique name for your output file. * The default name for your transcription output is the same as the name you * specified for your transcription job (TranscriptionJobName).

*

Here are some examples of how you can use OutputKey:

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName * and 'my-transcript.json' as the OutputKey, your transcription * output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json.

    *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json.

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName and 'test-files/my-transcript.json' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json.

  • *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'test-files/my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json.

    *

If you specify the name of an Amazon S3 bucket sub-folder that * doesn't exist, one is created for you.

*/ inline void SetOutputKey(Aws::String&& value) { m_outputKeyHasBeenSet = true; m_outputKey = std::move(value); } /** *

Use in combination with OutputBucketName to specify the output * location of your transcript and, optionally, a unique name for your output file. * The default name for your transcription output is the same as the name you * specified for your transcription job (TranscriptionJobName).

*

Here are some examples of how you can use OutputKey:

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName * and 'my-transcript.json' as the OutputKey, your transcription * output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json.

    *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json.

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName and 'test-files/my-transcript.json' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json.

  • *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'test-files/my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json.

    *

If you specify the name of an Amazon S3 bucket sub-folder that * doesn't exist, one is created for you.

*/ inline void SetOutputKey(const char* value) { m_outputKeyHasBeenSet = true; m_outputKey.assign(value); } /** *

Use in combination with OutputBucketName to specify the output * location of your transcript and, optionally, a unique name for your output file. * The default name for your transcription output is the same as the name you * specified for your transcription job (TranscriptionJobName).

*

Here are some examples of how you can use OutputKey:

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName * and 'my-transcript.json' as the OutputKey, your transcription * output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json.

    *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json.

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName and 'test-files/my-transcript.json' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json.

  • *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'test-files/my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json.

    *

If you specify the name of an Amazon S3 bucket sub-folder that * doesn't exist, one is created for you.

*/ inline StartTranscriptionJobRequest& WithOutputKey(const Aws::String& value) { SetOutputKey(value); return *this;} /** *

Use in combination with OutputBucketName to specify the output * location of your transcript and, optionally, a unique name for your output file. * The default name for your transcription output is the same as the name you * specified for your transcription job (TranscriptionJobName).

*

Here are some examples of how you can use OutputKey:

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName * and 'my-transcript.json' as the OutputKey, your transcription * output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json.

    *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json.

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName and 'test-files/my-transcript.json' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json.

  • *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'test-files/my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json.

    *

If you specify the name of an Amazon S3 bucket sub-folder that * doesn't exist, one is created for you.

*/ inline StartTranscriptionJobRequest& WithOutputKey(Aws::String&& value) { SetOutputKey(std::move(value)); return *this;} /** *

Use in combination with OutputBucketName to specify the output * location of your transcript and, optionally, a unique name for your output file. * The default name for your transcription output is the same as the name you * specified for your transcription job (TranscriptionJobName).

*

Here are some examples of how you can use OutputKey:

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName * and 'my-transcript.json' as the OutputKey, your transcription * output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json.

    *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json.

    *
  • If you specify 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName and 'test-files/my-transcript.json' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json.

  • *
  • If you specify 'my-first-transcription' as the * TranscriptionJobName, 'DOC-EXAMPLE-BUCKET' as the * OutputBucketName, and 'test-files/my-transcript' as the * OutputKey, your transcription output path is * s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json.

    *

If you specify the name of an Amazon S3 bucket sub-folder that * doesn't exist, one is created for you.

*/ inline StartTranscriptionJobRequest& WithOutputKey(const char* value) { SetOutputKey(value); return *this;} /** *

The KMS key you want to use to encrypt your transcription output.

If * using a key located in the current Amazon Web Services account, you can * specify your KMS key in one of four ways:

  1. Use the KMS key ID * itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use an alias for the KMS key ID. For example, * alias/ExampleAlias.

  3. Use the Amazon Resource Name * (ARN) for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  4. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If using a key located in a different Amazon Web Services account than * the current Amazon Web Services account, you can specify your KMS key in one of * two ways:

  1. Use the ARN for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If you don't specify an encryption key, your output is encrypted with the * default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your * output, you must also specify an output location using the * OutputLocation parameter.

Note that the role making the * request must have permission to use the specified KMS key.

*/ inline const Aws::String& GetOutputEncryptionKMSKeyId() const{ return m_outputEncryptionKMSKeyId; } /** *

The KMS key you want to use to encrypt your transcription output.

If * using a key located in the current Amazon Web Services account, you can * specify your KMS key in one of four ways:

  1. Use the KMS key ID * itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use an alias for the KMS key ID. For example, * alias/ExampleAlias.

  3. Use the Amazon Resource Name * (ARN) for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  4. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If using a key located in a different Amazon Web Services account than * the current Amazon Web Services account, you can specify your KMS key in one of * two ways:

  1. Use the ARN for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If you don't specify an encryption key, your output is encrypted with the * default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your * output, you must also specify an output location using the * OutputLocation parameter.

Note that the role making the * request must have permission to use the specified KMS key.

*/ inline bool OutputEncryptionKMSKeyIdHasBeenSet() const { return m_outputEncryptionKMSKeyIdHasBeenSet; } /** *

The KMS key you want to use to encrypt your transcription output.

If * using a key located in the current Amazon Web Services account, you can * specify your KMS key in one of four ways:

  1. Use the KMS key ID * itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use an alias for the KMS key ID. For example, * alias/ExampleAlias.

  3. Use the Amazon Resource Name * (ARN) for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  4. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If using a key located in a different Amazon Web Services account than * the current Amazon Web Services account, you can specify your KMS key in one of * two ways:

  1. Use the ARN for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If you don't specify an encryption key, your output is encrypted with the * default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your * output, you must also specify an output location using the * OutputLocation parameter.

Note that the role making the * request must have permission to use the specified KMS key.

*/ inline void SetOutputEncryptionKMSKeyId(const Aws::String& value) { m_outputEncryptionKMSKeyIdHasBeenSet = true; m_outputEncryptionKMSKeyId = value; } /** *

The KMS key you want to use to encrypt your transcription output.

If * using a key located in the current Amazon Web Services account, you can * specify your KMS key in one of four ways:

  1. Use the KMS key ID * itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use an alias for the KMS key ID. For example, * alias/ExampleAlias.

  3. Use the Amazon Resource Name * (ARN) for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  4. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If using a key located in a different Amazon Web Services account than * the current Amazon Web Services account, you can specify your KMS key in one of * two ways:

  1. Use the ARN for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If you don't specify an encryption key, your output is encrypted with the * default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your * output, you must also specify an output location using the * OutputLocation parameter.

Note that the role making the * request must have permission to use the specified KMS key.

*/ inline void SetOutputEncryptionKMSKeyId(Aws::String&& value) { m_outputEncryptionKMSKeyIdHasBeenSet = true; m_outputEncryptionKMSKeyId = std::move(value); } /** *

The KMS key you want to use to encrypt your transcription output.

If * using a key located in the current Amazon Web Services account, you can * specify your KMS key in one of four ways:

  1. Use the KMS key ID * itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use an alias for the KMS key ID. For example, * alias/ExampleAlias.

  3. Use the Amazon Resource Name * (ARN) for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  4. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If using a key located in a different Amazon Web Services account than * the current Amazon Web Services account, you can specify your KMS key in one of * two ways:

  1. Use the ARN for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If you don't specify an encryption key, your output is encrypted with the * default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your * output, you must also specify an output location using the * OutputLocation parameter.

Note that the role making the * request must have permission to use the specified KMS key.

*/ inline void SetOutputEncryptionKMSKeyId(const char* value) { m_outputEncryptionKMSKeyIdHasBeenSet = true; m_outputEncryptionKMSKeyId.assign(value); } /** *

The KMS key you want to use to encrypt your transcription output.

If * using a key located in the current Amazon Web Services account, you can * specify your KMS key in one of four ways:

  1. Use the KMS key ID * itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use an alias for the KMS key ID. For example, * alias/ExampleAlias.

  3. Use the Amazon Resource Name * (ARN) for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  4. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If using a key located in a different Amazon Web Services account than * the current Amazon Web Services account, you can specify your KMS key in one of * two ways:

  1. Use the ARN for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If you don't specify an encryption key, your output is encrypted with the * default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your * output, you must also specify an output location using the * OutputLocation parameter.

Note that the role making the * request must have permission to use the specified KMS key.

*/ inline StartTranscriptionJobRequest& WithOutputEncryptionKMSKeyId(const Aws::String& value) { SetOutputEncryptionKMSKeyId(value); return *this;} /** *

The KMS key you want to use to encrypt your transcription output.

If * using a key located in the current Amazon Web Services account, you can * specify your KMS key in one of four ways:

  1. Use the KMS key ID * itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use an alias for the KMS key ID. For example, * alias/ExampleAlias.

  3. Use the Amazon Resource Name * (ARN) for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  4. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If using a key located in a different Amazon Web Services account than * the current Amazon Web Services account, you can specify your KMS key in one of * two ways:

  1. Use the ARN for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If you don't specify an encryption key, your output is encrypted with the * default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your * output, you must also specify an output location using the * OutputLocation parameter.

Note that the role making the * request must have permission to use the specified KMS key.

*/ inline StartTranscriptionJobRequest& WithOutputEncryptionKMSKeyId(Aws::String&& value) { SetOutputEncryptionKMSKeyId(std::move(value)); return *this;} /** *

The KMS key you want to use to encrypt your transcription output.

If * using a key located in the current Amazon Web Services account, you can * specify your KMS key in one of four ways:

  1. Use the KMS key ID * itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use an alias for the KMS key ID. For example, * alias/ExampleAlias.

  3. Use the Amazon Resource Name * (ARN) for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  4. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If using a key located in a different Amazon Web Services account than * the current Amazon Web Services account, you can specify your KMS key in one of * two ways:

  1. Use the ARN for the KMS key ID. For example, * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    *
  2. Use the ARN for the KMS key alias. For example, * arn:aws:kms:region:account-ID:alias/ExampleAlias.

*

If you don't specify an encryption key, your output is encrypted with the * default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your * output, you must also specify an output location using the * OutputLocation parameter.

Note that the role making the * request must have permission to use the specified KMS key.

*/ inline StartTranscriptionJobRequest& WithOutputEncryptionKMSKeyId(const char* value) { SetOutputEncryptionKMSKeyId(value); return *this;} /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline const Aws::Map& GetKMSEncryptionContext() const{ return m_kMSEncryptionContext; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline bool KMSEncryptionContextHasBeenSet() const { return m_kMSEncryptionContextHasBeenSet; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline void SetKMSEncryptionContext(const Aws::Map& value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext = value; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline void SetKMSEncryptionContext(Aws::Map&& value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext = std::move(value); } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& WithKMSEncryptionContext(const Aws::Map& value) { SetKMSEncryptionContext(value); return *this;} /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& WithKMSEncryptionContext(Aws::Map&& value) { SetKMSEncryptionContext(std::move(value)); return *this;} /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& AddKMSEncryptionContext(const Aws::String& key, const Aws::String& value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext.emplace(key, value); return *this; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& AddKMSEncryptionContext(Aws::String&& key, const Aws::String& value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext.emplace(std::move(key), value); return *this; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& AddKMSEncryptionContext(const Aws::String& key, Aws::String&& value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext.emplace(key, std::move(value)); return *this; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& AddKMSEncryptionContext(Aws::String&& key, Aws::String&& value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext.emplace(std::move(key), std::move(value)); return *this; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& AddKMSEncryptionContext(const char* key, Aws::String&& value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext.emplace(key, std::move(value)); return *this; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& AddKMSEncryptionContext(Aws::String&& key, const char* value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext.emplace(std::move(key), value); return *this; } /** *

A map of plain text, non-secret key:value pairs, known as encryption context * pairs, that provide an added layer of security for your data. For more * information, see KMS * encryption context and Asymmetric * keys in KMS.

*/ inline StartTranscriptionJobRequest& AddKMSEncryptionContext(const char* key, const char* value) { m_kMSEncryptionContextHasBeenSet = true; m_kMSEncryptionContext.emplace(key, value); return *this; } /** *

Specify additional optional settings in your request, including channel * identification, alternative transcriptions, speaker partitioning. You can use * that to apply custom vocabularies and vocabulary filters.

If you want to * include a custom vocabulary or a custom vocabulary filter (or both) with your * request but do not want to use automatic language identification, use * Settings with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

If you're * using automatic language identification with your request and want to include a * custom language model, a custom vocabulary, or a custom vocabulary filter, use * instead the parameter with the LanguageModelName, * VocabularyName or VocabularyFilterName * sub-parameters.

*/ inline const Settings& GetSettings() const{ return m_settings; } /** *

Specify additional optional settings in your request, including channel * identification, alternative transcriptions, speaker partitioning. You can use * that to apply custom vocabularies and vocabulary filters.

If you want to * include a custom vocabulary or a custom vocabulary filter (or both) with your * request but do not want to use automatic language identification, use * Settings with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

If you're * using automatic language identification with your request and want to include a * custom language model, a custom vocabulary, or a custom vocabulary filter, use * instead the parameter with the LanguageModelName, * VocabularyName or VocabularyFilterName * sub-parameters.

*/ inline bool SettingsHasBeenSet() const { return m_settingsHasBeenSet; } /** *

Specify additional optional settings in your request, including channel * identification, alternative transcriptions, speaker partitioning. You can use * that to apply custom vocabularies and vocabulary filters.

If you want to * include a custom vocabulary or a custom vocabulary filter (or both) with your * request but do not want to use automatic language identification, use * Settings with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

If you're * using automatic language identification with your request and want to include a * custom language model, a custom vocabulary, or a custom vocabulary filter, use * instead the parameter with the LanguageModelName, * VocabularyName or VocabularyFilterName * sub-parameters.

*/ inline void SetSettings(const Settings& value) { m_settingsHasBeenSet = true; m_settings = value; } /** *

Specify additional optional settings in your request, including channel * identification, alternative transcriptions, speaker partitioning. You can use * that to apply custom vocabularies and vocabulary filters.

If you want to * include a custom vocabulary or a custom vocabulary filter (or both) with your * request but do not want to use automatic language identification, use * Settings with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

If you're * using automatic language identification with your request and want to include a * custom language model, a custom vocabulary, or a custom vocabulary filter, use * instead the parameter with the LanguageModelName, * VocabularyName or VocabularyFilterName * sub-parameters.

*/ inline void SetSettings(Settings&& value) { m_settingsHasBeenSet = true; m_settings = std::move(value); } /** *

Specify additional optional settings in your request, including channel * identification, alternative transcriptions, speaker partitioning. You can use * that to apply custom vocabularies and vocabulary filters.

If you want to * include a custom vocabulary or a custom vocabulary filter (or both) with your * request but do not want to use automatic language identification, use * Settings with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

If you're * using automatic language identification with your request and want to include a * custom language model, a custom vocabulary, or a custom vocabulary filter, use * instead the parameter with the LanguageModelName, * VocabularyName or VocabularyFilterName * sub-parameters.

*/ inline StartTranscriptionJobRequest& WithSettings(const Settings& value) { SetSettings(value); return *this;} /** *

Specify additional optional settings in your request, including channel * identification, alternative transcriptions, speaker partitioning. You can use * that to apply custom vocabularies and vocabulary filters.

If you want to * include a custom vocabulary or a custom vocabulary filter (or both) with your * request but do not want to use automatic language identification, use * Settings with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

If you're * using automatic language identification with your request and want to include a * custom language model, a custom vocabulary, or a custom vocabulary filter, use * instead the parameter with the LanguageModelName, * VocabularyName or VocabularyFilterName * sub-parameters.

*/ inline StartTranscriptionJobRequest& WithSettings(Settings&& value) { SetSettings(std::move(value)); return *this;} /** *

Specify the custom language model you want to include with your transcription * job. If you include ModelSettings in your request, you must include * the LanguageModelName sub-parameter.

For more information, * see Custom * language models.

*/ inline const ModelSettings& GetModelSettings() const{ return m_modelSettings; } /** *

Specify the custom language model you want to include with your transcription * job. If you include ModelSettings in your request, you must include * the LanguageModelName sub-parameter.

For more information, * see Custom * language models.

*/ inline bool ModelSettingsHasBeenSet() const { return m_modelSettingsHasBeenSet; } /** *

Specify the custom language model you want to include with your transcription * job. If you include ModelSettings in your request, you must include * the LanguageModelName sub-parameter.

For more information, * see Custom * language models.

*/ inline void SetModelSettings(const ModelSettings& value) { m_modelSettingsHasBeenSet = true; m_modelSettings = value; } /** *

Specify the custom language model you want to include with your transcription * job. If you include ModelSettings in your request, you must include * the LanguageModelName sub-parameter.

For more information, * see Custom * language models.

*/ inline void SetModelSettings(ModelSettings&& value) { m_modelSettingsHasBeenSet = true; m_modelSettings = std::move(value); } /** *

Specify the custom language model you want to include with your transcription * job. If you include ModelSettings in your request, you must include * the LanguageModelName sub-parameter.

For more information, * see Custom * language models.

*/ inline StartTranscriptionJobRequest& WithModelSettings(const ModelSettings& value) { SetModelSettings(value); return *this;} /** *

Specify the custom language model you want to include with your transcription * job. If you include ModelSettings in your request, you must include * the LanguageModelName sub-parameter.

For more information, * see Custom * language models.

*/ inline StartTranscriptionJobRequest& WithModelSettings(ModelSettings&& value) { SetModelSettings(std::move(value)); return *this;} /** *

Makes it possible to control how your transcription job is processed. * Currently, the only JobExecutionSettings modification you can * choose is enabling job queueing using the AllowDeferredExecution * sub-parameter.

If you include JobExecutionSettings in your * request, you must also include the sub-parameters: * AllowDeferredExecution and DataAccessRoleArn.

*/ inline const JobExecutionSettings& GetJobExecutionSettings() const{ return m_jobExecutionSettings; } /** *

Makes it possible to control how your transcription job is processed. * Currently, the only JobExecutionSettings modification you can * choose is enabling job queueing using the AllowDeferredExecution * sub-parameter.

If you include JobExecutionSettings in your * request, you must also include the sub-parameters: * AllowDeferredExecution and DataAccessRoleArn.

*/ inline bool JobExecutionSettingsHasBeenSet() const { return m_jobExecutionSettingsHasBeenSet; } /** *

Makes it possible to control how your transcription job is processed. * Currently, the only JobExecutionSettings modification you can * choose is enabling job queueing using the AllowDeferredExecution * sub-parameter.

If you include JobExecutionSettings in your * request, you must also include the sub-parameters: * AllowDeferredExecution and DataAccessRoleArn.

*/ inline void SetJobExecutionSettings(const JobExecutionSettings& value) { m_jobExecutionSettingsHasBeenSet = true; m_jobExecutionSettings = value; } /** *

Makes it possible to control how your transcription job is processed. * Currently, the only JobExecutionSettings modification you can * choose is enabling job queueing using the AllowDeferredExecution * sub-parameter.

If you include JobExecutionSettings in your * request, you must also include the sub-parameters: * AllowDeferredExecution and DataAccessRoleArn.

*/ inline void SetJobExecutionSettings(JobExecutionSettings&& value) { m_jobExecutionSettingsHasBeenSet = true; m_jobExecutionSettings = std::move(value); } /** *

Makes it possible to control how your transcription job is processed. * Currently, the only JobExecutionSettings modification you can * choose is enabling job queueing using the AllowDeferredExecution * sub-parameter.

If you include JobExecutionSettings in your * request, you must also include the sub-parameters: * AllowDeferredExecution and DataAccessRoleArn.

*/ inline StartTranscriptionJobRequest& WithJobExecutionSettings(const JobExecutionSettings& value) { SetJobExecutionSettings(value); return *this;} /** *

Makes it possible to control how your transcription job is processed. * Currently, the only JobExecutionSettings modification you can * choose is enabling job queueing using the AllowDeferredExecution * sub-parameter.

If you include JobExecutionSettings in your * request, you must also include the sub-parameters: * AllowDeferredExecution and DataAccessRoleArn.

*/ inline StartTranscriptionJobRequest& WithJobExecutionSettings(JobExecutionSettings&& value) { SetJobExecutionSettings(std::move(value)); return *this;} /** *

Makes it possible to redact or flag specified personally identifiable * information (PII) in your transcript. If you use ContentRedaction, * you must also include the sub-parameters: PiiEntityTypes, * RedactionOutput, and RedactionType.

*/ inline const ContentRedaction& GetContentRedaction() const{ return m_contentRedaction; } /** *

Makes it possible to redact or flag specified personally identifiable * information (PII) in your transcript. If you use ContentRedaction, * you must also include the sub-parameters: PiiEntityTypes, * RedactionOutput, and RedactionType.

*/ inline bool ContentRedactionHasBeenSet() const { return m_contentRedactionHasBeenSet; } /** *

Makes it possible to redact or flag specified personally identifiable * information (PII) in your transcript. If you use ContentRedaction, * you must also include the sub-parameters: PiiEntityTypes, * RedactionOutput, and RedactionType.

*/ inline void SetContentRedaction(const ContentRedaction& value) { m_contentRedactionHasBeenSet = true; m_contentRedaction = value; } /** *

Makes it possible to redact or flag specified personally identifiable * information (PII) in your transcript. If you use ContentRedaction, * you must also include the sub-parameters: PiiEntityTypes, * RedactionOutput, and RedactionType.

*/ inline void SetContentRedaction(ContentRedaction&& value) { m_contentRedactionHasBeenSet = true; m_contentRedaction = std::move(value); } /** *

Makes it possible to redact or flag specified personally identifiable * information (PII) in your transcript. If you use ContentRedaction, * you must also include the sub-parameters: PiiEntityTypes, * RedactionOutput, and RedactionType.

*/ inline StartTranscriptionJobRequest& WithContentRedaction(const ContentRedaction& value) { SetContentRedaction(value); return *this;} /** *

Makes it possible to redact or flag specified personally identifiable * information (PII) in your transcript. If you use ContentRedaction, * you must also include the sub-parameters: PiiEntityTypes, * RedactionOutput, and RedactionType.

*/ inline StartTranscriptionJobRequest& WithContentRedaction(ContentRedaction&& value) { SetContentRedaction(std::move(value)); return *this;} /** *

Enables automatic language identification in your transcription job request. * Use this parameter if your media file contains only one language. If your media * contains multiple languages, use IdentifyMultipleLanguages * instead.

If you include IdentifyLanguage, you can optionally * include a list of language codes, using LanguageOptions, that you * think may be present in your media file. Including LanguageOptions * restricts IdentifyLanguage to only the language options that you * specify, which can improve transcription accuracy.

If you want to apply a * custom language model, a custom vocabulary, or a custom vocabulary filter to * your automatic language identification request, include * LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). If you include * LanguageIdSettings, also include LanguageOptions.

*

Note that you must include one of LanguageCode, * IdentifyLanguage, or IdentifyMultipleLanguages in your * request. If you include more than one of these parameters, your transcription * job fails.

*/ inline bool GetIdentifyLanguage() const{ return m_identifyLanguage; } /** *

Enables automatic language identification in your transcription job request. * Use this parameter if your media file contains only one language. If your media * contains multiple languages, use IdentifyMultipleLanguages * instead.

If you include IdentifyLanguage, you can optionally * include a list of language codes, using LanguageOptions, that you * think may be present in your media file. Including LanguageOptions * restricts IdentifyLanguage to only the language options that you * specify, which can improve transcription accuracy.

If you want to apply a * custom language model, a custom vocabulary, or a custom vocabulary filter to * your automatic language identification request, include * LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). If you include * LanguageIdSettings, also include LanguageOptions.

*

Note that you must include one of LanguageCode, * IdentifyLanguage, or IdentifyMultipleLanguages in your * request. If you include more than one of these parameters, your transcription * job fails.

*/ inline bool IdentifyLanguageHasBeenSet() const { return m_identifyLanguageHasBeenSet; } /** *

Enables automatic language identification in your transcription job request. * Use this parameter if your media file contains only one language. If your media * contains multiple languages, use IdentifyMultipleLanguages * instead.

If you include IdentifyLanguage, you can optionally * include a list of language codes, using LanguageOptions, that you * think may be present in your media file. Including LanguageOptions * restricts IdentifyLanguage to only the language options that you * specify, which can improve transcription accuracy.

If you want to apply a * custom language model, a custom vocabulary, or a custom vocabulary filter to * your automatic language identification request, include * LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). If you include * LanguageIdSettings, also include LanguageOptions.

*

Note that you must include one of LanguageCode, * IdentifyLanguage, or IdentifyMultipleLanguages in your * request. If you include more than one of these parameters, your transcription * job fails.

*/ inline void SetIdentifyLanguage(bool value) { m_identifyLanguageHasBeenSet = true; m_identifyLanguage = value; } /** *

Enables automatic language identification in your transcription job request. * Use this parameter if your media file contains only one language. If your media * contains multiple languages, use IdentifyMultipleLanguages * instead.

If you include IdentifyLanguage, you can optionally * include a list of language codes, using LanguageOptions, that you * think may be present in your media file. Including LanguageOptions * restricts IdentifyLanguage to only the language options that you * specify, which can improve transcription accuracy.

If you want to apply a * custom language model, a custom vocabulary, or a custom vocabulary filter to * your automatic language identification request, include * LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). If you include * LanguageIdSettings, also include LanguageOptions.

*

Note that you must include one of LanguageCode, * IdentifyLanguage, or IdentifyMultipleLanguages in your * request. If you include more than one of these parameters, your transcription * job fails.

*/ inline StartTranscriptionJobRequest& WithIdentifyLanguage(bool value) { SetIdentifyLanguage(value); return *this;} /** *

Enables automatic multi-language identification in your transcription job * request. Use this parameter if your media file contains more than one language. * If your media contains only one language, use IdentifyLanguage * instead.

If you include IdentifyMultipleLanguages, you can * optionally include a list of language codes, using LanguageOptions, * that you think may be present in your media file. Including * LanguageOptions restricts IdentifyLanguage to only the * language options that you specify, which can improve transcription accuracy.

*

If you want to apply a custom vocabulary or a custom vocabulary filter to * your automatic language identification request, include * LanguageIdSettings with the relevant sub-parameters * (VocabularyName and VocabularyFilterName). If you * include LanguageIdSettings, also include * LanguageOptions.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

*/ inline bool GetIdentifyMultipleLanguages() const{ return m_identifyMultipleLanguages; } /** *

Enables automatic multi-language identification in your transcription job * request. Use this parameter if your media file contains more than one language. * If your media contains only one language, use IdentifyLanguage * instead.

If you include IdentifyMultipleLanguages, you can * optionally include a list of language codes, using LanguageOptions, * that you think may be present in your media file. Including * LanguageOptions restricts IdentifyLanguage to only the * language options that you specify, which can improve transcription accuracy.

*

If you want to apply a custom vocabulary or a custom vocabulary filter to * your automatic language identification request, include * LanguageIdSettings with the relevant sub-parameters * (VocabularyName and VocabularyFilterName). If you * include LanguageIdSettings, also include * LanguageOptions.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

*/ inline bool IdentifyMultipleLanguagesHasBeenSet() const { return m_identifyMultipleLanguagesHasBeenSet; } /** *

Enables automatic multi-language identification in your transcription job * request. Use this parameter if your media file contains more than one language. * If your media contains only one language, use IdentifyLanguage * instead.

If you include IdentifyMultipleLanguages, you can * optionally include a list of language codes, using LanguageOptions, * that you think may be present in your media file. Including * LanguageOptions restricts IdentifyLanguage to only the * language options that you specify, which can improve transcription accuracy.

*

If you want to apply a custom vocabulary or a custom vocabulary filter to * your automatic language identification request, include * LanguageIdSettings with the relevant sub-parameters * (VocabularyName and VocabularyFilterName). If you * include LanguageIdSettings, also include * LanguageOptions.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

*/ inline void SetIdentifyMultipleLanguages(bool value) { m_identifyMultipleLanguagesHasBeenSet = true; m_identifyMultipleLanguages = value; } /** *

Enables automatic multi-language identification in your transcription job * request. Use this parameter if your media file contains more than one language. * If your media contains only one language, use IdentifyLanguage * instead.

If you include IdentifyMultipleLanguages, you can * optionally include a list of language codes, using LanguageOptions, * that you think may be present in your media file. Including * LanguageOptions restricts IdentifyLanguage to only the * language options that you specify, which can improve transcription accuracy.

*

If you want to apply a custom vocabulary or a custom vocabulary filter to * your automatic language identification request, include * LanguageIdSettings with the relevant sub-parameters * (VocabularyName and VocabularyFilterName). If you * include LanguageIdSettings, also include * LanguageOptions.

Note that you must include one of * LanguageCode, IdentifyLanguage, or * IdentifyMultipleLanguages in your request. If you include more than * one of these parameters, your transcription job fails.

*/ inline StartTranscriptionJobRequest& WithIdentifyMultipleLanguages(bool value) { SetIdentifyMultipleLanguages(value); return *this;} /** *

You can specify two or more language codes that represent the languages you * think may be present in your media. Including more than five is not recommended. * If you're unsure what languages are present, do not include this parameter.

*

If you include LanguageOptions in your request, you must also * include IdentifyLanguage.

For more information, refer to Supported * languages.

To transcribe speech in Modern Standard Arabic * (ar-SA), your media file must be encoded at a sample rate of 16,000 * Hz or higher.

*/ inline const Aws::Vector& GetLanguageOptions() const{ return m_languageOptions; } /** *

You can specify two or more language codes that represent the languages you * think may be present in your media. Including more than five is not recommended. * If you're unsure what languages are present, do not include this parameter.

*

If you include LanguageOptions in your request, you must also * include IdentifyLanguage.

For more information, refer to Supported * languages.

To transcribe speech in Modern Standard Arabic * (ar-SA), your media file must be encoded at a sample rate of 16,000 * Hz or higher.

*/ inline bool LanguageOptionsHasBeenSet() const { return m_languageOptionsHasBeenSet; } /** *

You can specify two or more language codes that represent the languages you * think may be present in your media. Including more than five is not recommended. * If you're unsure what languages are present, do not include this parameter.

*

If you include LanguageOptions in your request, you must also * include IdentifyLanguage.

For more information, refer to Supported * languages.

To transcribe speech in Modern Standard Arabic * (ar-SA), your media file must be encoded at a sample rate of 16,000 * Hz or higher.

*/ inline void SetLanguageOptions(const Aws::Vector& value) { m_languageOptionsHasBeenSet = true; m_languageOptions = value; } /** *

You can specify two or more language codes that represent the languages you * think may be present in your media. Including more than five is not recommended. * If you're unsure what languages are present, do not include this parameter.

*

If you include LanguageOptions in your request, you must also * include IdentifyLanguage.

For more information, refer to Supported * languages.

To transcribe speech in Modern Standard Arabic * (ar-SA), your media file must be encoded at a sample rate of 16,000 * Hz or higher.

*/ inline void SetLanguageOptions(Aws::Vector&& value) { m_languageOptionsHasBeenSet = true; m_languageOptions = std::move(value); } /** *

You can specify two or more language codes that represent the languages you * think may be present in your media. Including more than five is not recommended. * If you're unsure what languages are present, do not include this parameter.

*

If you include LanguageOptions in your request, you must also * include IdentifyLanguage.

For more information, refer to Supported * languages.

To transcribe speech in Modern Standard Arabic * (ar-SA), your media file must be encoded at a sample rate of 16,000 * Hz or higher.

*/ inline StartTranscriptionJobRequest& WithLanguageOptions(const Aws::Vector& value) { SetLanguageOptions(value); return *this;} /** *

You can specify two or more language codes that represent the languages you * think may be present in your media. Including more than five is not recommended. * If you're unsure what languages are present, do not include this parameter.

*

If you include LanguageOptions in your request, you must also * include IdentifyLanguage.

For more information, refer to Supported * languages.

To transcribe speech in Modern Standard Arabic * (ar-SA), your media file must be encoded at a sample rate of 16,000 * Hz or higher.

*/ inline StartTranscriptionJobRequest& WithLanguageOptions(Aws::Vector&& value) { SetLanguageOptions(std::move(value)); return *this;} /** *

You can specify two or more language codes that represent the languages you * think may be present in your media. Including more than five is not recommended. * If you're unsure what languages are present, do not include this parameter.

*

If you include LanguageOptions in your request, you must also * include IdentifyLanguage.

For more information, refer to Supported * languages.

To transcribe speech in Modern Standard Arabic * (ar-SA), your media file must be encoded at a sample rate of 16,000 * Hz or higher.

*/ inline StartTranscriptionJobRequest& AddLanguageOptions(const LanguageCode& value) { m_languageOptionsHasBeenSet = true; m_languageOptions.push_back(value); return *this; } /** *

You can specify two or more language codes that represent the languages you * think may be present in your media. Including more than five is not recommended. * If you're unsure what languages are present, do not include this parameter.

*

If you include LanguageOptions in your request, you must also * include IdentifyLanguage.

For more information, refer to Supported * languages.

To transcribe speech in Modern Standard Arabic * (ar-SA), your media file must be encoded at a sample rate of 16,000 * Hz or higher.

*/ inline StartTranscriptionJobRequest& AddLanguageOptions(LanguageCode&& value) { m_languageOptionsHasBeenSet = true; m_languageOptions.push_back(std::move(value)); return *this; } /** *

Produces subtitle files for your input media. You can specify WebVTT (*.vtt) * and SubRip (*.srt) formats.

*/ inline const Subtitles& GetSubtitles() const{ return m_subtitles; } /** *

Produces subtitle files for your input media. You can specify WebVTT (*.vtt) * and SubRip (*.srt) formats.

*/ inline bool SubtitlesHasBeenSet() const { return m_subtitlesHasBeenSet; } /** *

Produces subtitle files for your input media. You can specify WebVTT (*.vtt) * and SubRip (*.srt) formats.

*/ inline void SetSubtitles(const Subtitles& value) { m_subtitlesHasBeenSet = true; m_subtitles = value; } /** *

Produces subtitle files for your input media. You can specify WebVTT (*.vtt) * and SubRip (*.srt) formats.

*/ inline void SetSubtitles(Subtitles&& value) { m_subtitlesHasBeenSet = true; m_subtitles = std::move(value); } /** *

Produces subtitle files for your input media. You can specify WebVTT (*.vtt) * and SubRip (*.srt) formats.

*/ inline StartTranscriptionJobRequest& WithSubtitles(const Subtitles& value) { SetSubtitles(value); return *this;} /** *

Produces subtitle files for your input media. You can specify WebVTT (*.vtt) * and SubRip (*.srt) formats.

*/ inline StartTranscriptionJobRequest& WithSubtitles(Subtitles&& value) { SetSubtitles(std::move(value)); return *this;} /** *

Adds one or more custom tags, each in the form of a key:value pair, to a new * transcription job at the time you start this new job.

To learn more about * using tags with Amazon Transcribe, refer to Tagging * resources.

*/ inline const Aws::Vector& GetTags() const{ return m_tags; } /** *

Adds one or more custom tags, each in the form of a key:value pair, to a new * transcription job at the time you start this new job.

To learn more about * using tags with Amazon Transcribe, refer to Tagging * resources.

*/ inline bool TagsHasBeenSet() const { return m_tagsHasBeenSet; } /** *

Adds one or more custom tags, each in the form of a key:value pair, to a new * transcription job at the time you start this new job.

To learn more about * using tags with Amazon Transcribe, refer to Tagging * resources.

*/ inline void SetTags(const Aws::Vector& value) { m_tagsHasBeenSet = true; m_tags = value; } /** *

Adds one or more custom tags, each in the form of a key:value pair, to a new * transcription job at the time you start this new job.

To learn more about * using tags with Amazon Transcribe, refer to Tagging * resources.

*/ inline void SetTags(Aws::Vector&& value) { m_tagsHasBeenSet = true; m_tags = std::move(value); } /** *

Adds one or more custom tags, each in the form of a key:value pair, to a new * transcription job at the time you start this new job.

To learn more about * using tags with Amazon Transcribe, refer to Tagging * resources.

*/ inline StartTranscriptionJobRequest& WithTags(const Aws::Vector& value) { SetTags(value); return *this;} /** *

Adds one or more custom tags, each in the form of a key:value pair, to a new * transcription job at the time you start this new job.

To learn more about * using tags with Amazon Transcribe, refer to Tagging * resources.

*/ inline StartTranscriptionJobRequest& WithTags(Aws::Vector&& value) { SetTags(std::move(value)); return *this;} /** *

Adds one or more custom tags, each in the form of a key:value pair, to a new * transcription job at the time you start this new job.

To learn more about * using tags with Amazon Transcribe, refer to Tagging * resources.

*/ inline StartTranscriptionJobRequest& AddTags(const Tag& value) { m_tagsHasBeenSet = true; m_tags.push_back(value); return *this; } /** *

Adds one or more custom tags, each in the form of a key:value pair, to a new * transcription job at the time you start this new job.

To learn more about * using tags with Amazon Transcribe, refer to Tagging * resources.

*/ inline StartTranscriptionJobRequest& AddTags(Tag&& value) { m_tagsHasBeenSet = true; m_tags.push_back(std::move(value)); return *this; } /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline const Aws::Map& GetLanguageIdSettings() const{ return m_languageIdSettings; } /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline bool LanguageIdSettingsHasBeenSet() const { return m_languageIdSettingsHasBeenSet; } /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline void SetLanguageIdSettings(const Aws::Map& value) { m_languageIdSettingsHasBeenSet = true; m_languageIdSettings = value; } /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline void SetLanguageIdSettings(Aws::Map&& value) { m_languageIdSettingsHasBeenSet = true; m_languageIdSettings = std::move(value); } /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline StartTranscriptionJobRequest& WithLanguageIdSettings(const Aws::Map& value) { SetLanguageIdSettings(value); return *this;} /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline StartTranscriptionJobRequest& WithLanguageIdSettings(Aws::Map&& value) { SetLanguageIdSettings(std::move(value)); return *this;} /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline StartTranscriptionJobRequest& AddLanguageIdSettings(const LanguageCode& key, const LanguageIdSettings& value) { m_languageIdSettingsHasBeenSet = true; m_languageIdSettings.emplace(key, value); return *this; } /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline StartTranscriptionJobRequest& AddLanguageIdSettings(LanguageCode&& key, const LanguageIdSettings& value) { m_languageIdSettingsHasBeenSet = true; m_languageIdSettings.emplace(std::move(key), value); return *this; } /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline StartTranscriptionJobRequest& AddLanguageIdSettings(const LanguageCode& key, LanguageIdSettings&& value) { m_languageIdSettingsHasBeenSet = true; m_languageIdSettings.emplace(key, std::move(value)); return *this; } /** *

If using automatic language identification in your request and you want to * apply a custom language model, a custom vocabulary, or a custom vocabulary * filter, include LanguageIdSettings with the relevant sub-parameters * (VocabularyName, LanguageModelName, and * VocabularyFilterName). Note that multi-language identification * (IdentifyMultipleLanguages) doesn't support custom language * models.

LanguageIdSettings supports two to five language * codes. Each language code you include can have an associated custom language * model, custom vocabulary, and custom vocabulary filter. The language codes that * you specify must match the languages of the associated custom language models, * custom vocabularies, and custom vocabulary filters.

It's recommended that * you include LanguageOptions when using * LanguageIdSettings to ensure that the correct language dialect is * identified. For example, if you specify a custom vocabulary that is in * en-US but Amazon Transcribe determines that the language spoken in * your media is en-AU, your custom vocabulary is not applied * to your transcription. If you include LanguageOptions and include * en-US as the only English language dialect, your custom vocabulary * is applied to your transcription.

If you want to include a custom * language model with your request but do not want to use automatic * language identification, use instead the parameter with the * LanguageModelName sub-parameter. If you want to include a custom * vocabulary or a custom vocabulary filter (or both) with your request but do * not want to use automatic language identification, use instead the * parameter with the VocabularyName or * VocabularyFilterName (or both) sub-parameter.

*/ inline StartTranscriptionJobRequest& AddLanguageIdSettings(LanguageCode&& key, LanguageIdSettings&& value) { m_languageIdSettingsHasBeenSet = true; m_languageIdSettings.emplace(std::move(key), std::move(value)); return *this; } /** *

Enables toxic speech detection in your transcript. If you include * ToxicityDetection in your request, you must also include * ToxicityCategories.

For information on the types of toxic * speech Amazon Transcribe can detect, see Detecting * toxic speech.

*/ inline const Aws::Vector& GetToxicityDetection() const{ return m_toxicityDetection; } /** *

Enables toxic speech detection in your transcript. If you include * ToxicityDetection in your request, you must also include * ToxicityCategories.

For information on the types of toxic * speech Amazon Transcribe can detect, see Detecting * toxic speech.

*/ inline bool ToxicityDetectionHasBeenSet() const { return m_toxicityDetectionHasBeenSet; } /** *

Enables toxic speech detection in your transcript. If you include * ToxicityDetection in your request, you must also include * ToxicityCategories.

For information on the types of toxic * speech Amazon Transcribe can detect, see Detecting * toxic speech.

*/ inline void SetToxicityDetection(const Aws::Vector& value) { m_toxicityDetectionHasBeenSet = true; m_toxicityDetection = value; } /** *

Enables toxic speech detection in your transcript. If you include * ToxicityDetection in your request, you must also include * ToxicityCategories.

For information on the types of toxic * speech Amazon Transcribe can detect, see Detecting * toxic speech.

*/ inline void SetToxicityDetection(Aws::Vector&& value) { m_toxicityDetectionHasBeenSet = true; m_toxicityDetection = std::move(value); } /** *

Enables toxic speech detection in your transcript. If you include * ToxicityDetection in your request, you must also include * ToxicityCategories.

For information on the types of toxic * speech Amazon Transcribe can detect, see Detecting * toxic speech.

*/ inline StartTranscriptionJobRequest& WithToxicityDetection(const Aws::Vector& value) { SetToxicityDetection(value); return *this;} /** *

Enables toxic speech detection in your transcript. If you include * ToxicityDetection in your request, you must also include * ToxicityCategories.

For information on the types of toxic * speech Amazon Transcribe can detect, see Detecting * toxic speech.

*/ inline StartTranscriptionJobRequest& WithToxicityDetection(Aws::Vector&& value) { SetToxicityDetection(std::move(value)); return *this;} /** *

Enables toxic speech detection in your transcript. If you include * ToxicityDetection in your request, you must also include * ToxicityCategories.

For information on the types of toxic * speech Amazon Transcribe can detect, see Detecting * toxic speech.

*/ inline StartTranscriptionJobRequest& AddToxicityDetection(const ToxicityDetectionSettings& value) { m_toxicityDetectionHasBeenSet = true; m_toxicityDetection.push_back(value); return *this; } /** *

Enables toxic speech detection in your transcript. If you include * ToxicityDetection in your request, you must also include * ToxicityCategories.

For information on the types of toxic * speech Amazon Transcribe can detect, see Detecting * toxic speech.

*/ inline StartTranscriptionJobRequest& AddToxicityDetection(ToxicityDetectionSettings&& value) { m_toxicityDetectionHasBeenSet = true; m_toxicityDetection.push_back(std::move(value)); return *this; } private: Aws::String m_transcriptionJobName; bool m_transcriptionJobNameHasBeenSet = false; LanguageCode m_languageCode; bool m_languageCodeHasBeenSet = false; int m_mediaSampleRateHertz; bool m_mediaSampleRateHertzHasBeenSet = false; MediaFormat m_mediaFormat; bool m_mediaFormatHasBeenSet = false; Media m_media; bool m_mediaHasBeenSet = false; Aws::String m_outputBucketName; bool m_outputBucketNameHasBeenSet = false; Aws::String m_outputKey; bool m_outputKeyHasBeenSet = false; Aws::String m_outputEncryptionKMSKeyId; bool m_outputEncryptionKMSKeyIdHasBeenSet = false; Aws::Map m_kMSEncryptionContext; bool m_kMSEncryptionContextHasBeenSet = false; Settings m_settings; bool m_settingsHasBeenSet = false; ModelSettings m_modelSettings; bool m_modelSettingsHasBeenSet = false; JobExecutionSettings m_jobExecutionSettings; bool m_jobExecutionSettingsHasBeenSet = false; ContentRedaction m_contentRedaction; bool m_contentRedactionHasBeenSet = false; bool m_identifyLanguage; bool m_identifyLanguageHasBeenSet = false; bool m_identifyMultipleLanguages; bool m_identifyMultipleLanguagesHasBeenSet = false; Aws::Vector m_languageOptions; bool m_languageOptionsHasBeenSet = false; Subtitles m_subtitles; bool m_subtitlesHasBeenSet = false; Aws::Vector m_tags; bool m_tagsHasBeenSet = false; Aws::Map m_languageIdSettings; bool m_languageIdSettingsHasBeenSet = false; Aws::Vector m_toxicityDetection; bool m_toxicityDetectionHasBeenSet = false; }; } // namespace Model } // namespace TranscribeService } // namespace Aws