/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace TranscribeStreamingService { namespace Model { /** */ class StartCallAnalyticsStreamTranscriptionRequest : public TranscribeStreamingServiceRequest { public: AWS_TRANSCRIBESTREAMINGSERVICE_API StartCallAnalyticsStreamTranscriptionRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "StartCallAnalyticsStreamTranscription"; } inline virtual bool IsEventStreamRequest() const override { return true; } // SerializePayload will not be invoked. // This request is sent by encoding its data in event-streams which is sent as IOStream via GetBody() AWS_TRANSCRIBESTREAMINGSERVICE_API Aws::String SerializePayload() const override { return {}; } AWS_TRANSCRIBESTREAMINGSERVICE_API std::shared_ptr GetBody() const override; AWS_TRANSCRIBESTREAMINGSERVICE_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override; /** * Underlying Event Stream Decoder. */ inline Aws::Utils::Event::EventStreamDecoder& GetEventStreamDecoder() { return m_decoder; } /** * Underlying Event Stream Handler which is used to define callback functions. */ inline const StartCallAnalyticsStreamTranscriptionHandler& GetEventStreamHandler() const { return m_handler; } /** * Underlying Event Stream Handler which is used to define callback functions. */ inline void SetEventStreamHandler(const StartCallAnalyticsStreamTranscriptionHandler& value) { m_handler = value; m_decoder.ResetEventStreamHandler(&m_handler); } /** * Underlying Event Stream Handler which is used to define callback functions. */ inline StartCallAnalyticsStreamTranscriptionRequest& WithEventStreamHandler(const StartCallAnalyticsStreamTranscriptionHandler& value) { SetEventStreamHandler(value); return *this; } /** *

Specify the language code that represents the language spoken in your * audio.

If you're unsure of the language spoken in your audio, consider * using IdentifyLanguage to enable automatic language * identification.

For a list of languages supported with streaming Call * Analytics, refer to the Supported * languages table.

*/ inline const CallAnalyticsLanguageCode& GetLanguageCode() const{ return m_languageCode; } /** *

Specify the language code that represents the language spoken in your * audio.

If you're unsure of the language spoken in your audio, consider * using IdentifyLanguage to enable automatic language * identification.

For a list of languages supported with streaming Call * Analytics, refer to the Supported * languages table.

*/ inline bool LanguageCodeHasBeenSet() const { return m_languageCodeHasBeenSet; } /** *

Specify the language code that represents the language spoken in your * audio.

If you're unsure of the language spoken in your audio, consider * using IdentifyLanguage to enable automatic language * identification.

For a list of languages supported with streaming Call * Analytics, refer to the Supported * languages table.

*/ inline void SetLanguageCode(const CallAnalyticsLanguageCode& value) { m_languageCodeHasBeenSet = true; m_languageCode = value; } /** *

Specify the language code that represents the language spoken in your * audio.

If you're unsure of the language spoken in your audio, consider * using IdentifyLanguage to enable automatic language * identification.

For a list of languages supported with streaming Call * Analytics, refer to the Supported * languages table.

*/ inline void SetLanguageCode(CallAnalyticsLanguageCode&& value) { m_languageCodeHasBeenSet = true; m_languageCode = std::move(value); } /** *

Specify the language code that represents the language spoken in your * audio.

If you're unsure of the language spoken in your audio, consider * using IdentifyLanguage to enable automatic language * identification.

For a list of languages supported with streaming Call * Analytics, refer to the Supported * languages table.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithLanguageCode(const CallAnalyticsLanguageCode& value) { SetLanguageCode(value); return *this;} /** *

Specify the language code that represents the language spoken in your * audio.

If you're unsure of the language spoken in your audio, consider * using IdentifyLanguage to enable automatic language * identification.

For a list of languages supported with streaming Call * Analytics, refer to the Supported * languages table.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithLanguageCode(CallAnalyticsLanguageCode&& value) { SetLanguageCode(std::move(value)); return *this;} /** *

The sample rate of the input audio (in hertz). Low-quality audio, such as * telephone audio, is typically around 8,000 Hz. High-quality audio typically * ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must * match that of your audio.

*/ inline int GetMediaSampleRateHertz() const{ return m_mediaSampleRateHertz; } /** *

The sample rate of the input audio (in hertz). Low-quality audio, such as * telephone audio, is typically around 8,000 Hz. High-quality audio typically * ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must * match that of your audio.

*/ inline bool MediaSampleRateHertzHasBeenSet() const { return m_mediaSampleRateHertzHasBeenSet; } /** *

The sample rate of the input audio (in hertz). Low-quality audio, such as * telephone audio, is typically around 8,000 Hz. High-quality audio typically * ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must * match that of your audio.

*/ inline void SetMediaSampleRateHertz(int value) { m_mediaSampleRateHertzHasBeenSet = true; m_mediaSampleRateHertz = value; } /** *

The sample rate of the input audio (in hertz). Low-quality audio, such as * telephone audio, is typically around 8,000 Hz. High-quality audio typically * ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must * match that of your audio.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithMediaSampleRateHertz(int value) { SetMediaSampleRateHertz(value); return *this;} /** *

Specify the encoding of your input audio. Supported formats are:

    *
  • FLAC

  • OPUS-encoded audio in an Ogg container

  • *
  • PCM (only signed 16-bit little-endian audio formats, which does not * include WAV)

For more information, see Media * formats.

*/ inline const MediaEncoding& GetMediaEncoding() const{ return m_mediaEncoding; } /** *

Specify the encoding of your input audio. Supported formats are:

    *
  • FLAC

  • OPUS-encoded audio in an Ogg container

  • *
  • PCM (only signed 16-bit little-endian audio formats, which does not * include WAV)

For more information, see Media * formats.

*/ inline bool MediaEncodingHasBeenSet() const { return m_mediaEncodingHasBeenSet; } /** *

Specify the encoding of your input audio. Supported formats are:

    *
  • FLAC

  • OPUS-encoded audio in an Ogg container

  • *
  • PCM (only signed 16-bit little-endian audio formats, which does not * include WAV)

For more information, see Media * formats.

*/ inline void SetMediaEncoding(const MediaEncoding& value) { m_mediaEncodingHasBeenSet = true; m_mediaEncoding = value; } /** *

Specify the encoding of your input audio. Supported formats are:

    *
  • FLAC

  • OPUS-encoded audio in an Ogg container

  • *
  • PCM (only signed 16-bit little-endian audio formats, which does not * include WAV)

For more information, see Media * formats.

*/ inline void SetMediaEncoding(MediaEncoding&& value) { m_mediaEncodingHasBeenSet = true; m_mediaEncoding = std::move(value); } /** *

Specify the encoding of your input audio. Supported formats are:

    *
  • FLAC

  • OPUS-encoded audio in an Ogg container

  • *
  • PCM (only signed 16-bit little-endian audio formats, which does not * include WAV)

For more information, see Media * formats.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithMediaEncoding(const MediaEncoding& value) { SetMediaEncoding(value); return *this;} /** *

Specify the encoding of your input audio. Supported formats are:

    *
  • FLAC

  • OPUS-encoded audio in an Ogg container

  • *
  • PCM (only signed 16-bit little-endian audio formats, which does not * include WAV)

For more information, see Media * formats.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithMediaEncoding(MediaEncoding&& value) { SetMediaEncoding(std::move(value)); return *this;} /** *

Specify the name of the custom vocabulary that you want to use when * processing your transcription. Note that vocabulary names are case * sensitive.

If the language of the specified custom vocabulary doesn't * match the language identified in your media, the custom vocabulary is not * applied to your transcription.

For more information, see Custom * vocabularies.

*/ inline const Aws::String& GetVocabularyName() const{ return m_vocabularyName; } /** *

Specify the name of the custom vocabulary that you want to use when * processing your transcription. Note that vocabulary names are case * sensitive.

If the language of the specified custom vocabulary doesn't * match the language identified in your media, the custom vocabulary is not * applied to your transcription.

For more information, see Custom * vocabularies.

*/ inline bool VocabularyNameHasBeenSet() const { return m_vocabularyNameHasBeenSet; } /** *

Specify the name of the custom vocabulary that you want to use when * processing your transcription. Note that vocabulary names are case * sensitive.

If the language of the specified custom vocabulary doesn't * match the language identified in your media, the custom vocabulary is not * applied to your transcription.

For more information, see Custom * vocabularies.

*/ inline void SetVocabularyName(const Aws::String& value) { m_vocabularyNameHasBeenSet = true; m_vocabularyName = value; } /** *

Specify the name of the custom vocabulary that you want to use when * processing your transcription. Note that vocabulary names are case * sensitive.

If the language of the specified custom vocabulary doesn't * match the language identified in your media, the custom vocabulary is not * applied to your transcription.

For more information, see Custom * vocabularies.

*/ inline void SetVocabularyName(Aws::String&& value) { m_vocabularyNameHasBeenSet = true; m_vocabularyName = std::move(value); } /** *

Specify the name of the custom vocabulary that you want to use when * processing your transcription. Note that vocabulary names are case * sensitive.

If the language of the specified custom vocabulary doesn't * match the language identified in your media, the custom vocabulary is not * applied to your transcription.

For more information, see Custom * vocabularies.

*/ inline void SetVocabularyName(const char* value) { m_vocabularyNameHasBeenSet = true; m_vocabularyName.assign(value); } /** *

Specify the name of the custom vocabulary that you want to use when * processing your transcription. Note that vocabulary names are case * sensitive.

If the language of the specified custom vocabulary doesn't * match the language identified in your media, the custom vocabulary is not * applied to your transcription.

For more information, see Custom * vocabularies.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithVocabularyName(const Aws::String& value) { SetVocabularyName(value); return *this;} /** *

Specify the name of the custom vocabulary that you want to use when * processing your transcription. Note that vocabulary names are case * sensitive.

If the language of the specified custom vocabulary doesn't * match the language identified in your media, the custom vocabulary is not * applied to your transcription.

For more information, see Custom * vocabularies.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithVocabularyName(Aws::String&& value) { SetVocabularyName(std::move(value)); return *this;} /** *

Specify the name of the custom vocabulary that you want to use when * processing your transcription. Note that vocabulary names are case * sensitive.

If the language of the specified custom vocabulary doesn't * match the language identified in your media, the custom vocabulary is not * applied to your transcription.

For more information, see Custom * vocabularies.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithVocabularyName(const char* value) { SetVocabularyName(value); return *this;} /** *

Specify a name for your Call Analytics transcription session. If you don't * include this parameter in your request, Amazon Transcribe generates an ID and * returns it in the response.

You can use a session ID to retry a streaming * session.

*/ inline const Aws::String& GetSessionId() const{ return m_sessionId; } /** *

Specify a name for your Call Analytics transcription session. If you don't * include this parameter in your request, Amazon Transcribe generates an ID and * returns it in the response.

You can use a session ID to retry a streaming * session.

*/ inline bool SessionIdHasBeenSet() const { return m_sessionIdHasBeenSet; } /** *

Specify a name for your Call Analytics transcription session. If you don't * include this parameter in your request, Amazon Transcribe generates an ID and * returns it in the response.

You can use a session ID to retry a streaming * session.

*/ inline void SetSessionId(const Aws::String& value) { m_sessionIdHasBeenSet = true; m_sessionId = value; } /** *

Specify a name for your Call Analytics transcription session. If you don't * include this parameter in your request, Amazon Transcribe generates an ID and * returns it in the response.

You can use a session ID to retry a streaming * session.

*/ inline void SetSessionId(Aws::String&& value) { m_sessionIdHasBeenSet = true; m_sessionId = std::move(value); } /** *

Specify a name for your Call Analytics transcription session. If you don't * include this parameter in your request, Amazon Transcribe generates an ID and * returns it in the response.

You can use a session ID to retry a streaming * session.

*/ inline void SetSessionId(const char* value) { m_sessionIdHasBeenSet = true; m_sessionId.assign(value); } /** *

Specify a name for your Call Analytics transcription session. If you don't * include this parameter in your request, Amazon Transcribe generates an ID and * returns it in the response.

You can use a session ID to retry a streaming * session.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithSessionId(const Aws::String& value) { SetSessionId(value); return *this;} /** *

Specify a name for your Call Analytics transcription session. If you don't * include this parameter in your request, Amazon Transcribe generates an ID and * returns it in the response.

You can use a session ID to retry a streaming * session.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithSessionId(Aws::String&& value) { SetSessionId(std::move(value)); return *this;} /** *

Specify a name for your Call Analytics transcription session. If you don't * include this parameter in your request, Amazon Transcribe generates an ID and * returns it in the response.

You can use a session ID to retry a streaming * session.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithSessionId(const char* value) { SetSessionId(value); return *this;} AWS_TRANSCRIBESTREAMINGSERVICE_API std::shared_ptr GetAudioStream() const { return m_audioStream; } AWS_TRANSCRIBESTREAMINGSERVICE_API void SetAudioStream(const std::shared_ptr& value) { m_audioStream = value; } AWS_TRANSCRIBESTREAMINGSERVICE_API StartCallAnalyticsStreamTranscriptionRequest& WithAudioStream(const std::shared_ptr& value) { SetAudioStream(value); return *this;} /** *

Specify the name of the custom vocabulary filter that you want to use when * processing your transcription. Note that vocabulary filter names are case * sensitive.

If the language of the specified custom vocabulary filter * doesn't match the language identified in your media, the vocabulary filter is * not applied to your transcription.

For more information, see Using * vocabulary filtering with unwanted words.

*/ inline const Aws::String& GetVocabularyFilterName() const{ return m_vocabularyFilterName; } /** *

Specify the name of the custom vocabulary filter that you want to use when * processing your transcription. Note that vocabulary filter names are case * sensitive.

If the language of the specified custom vocabulary filter * doesn't match the language identified in your media, the vocabulary filter is * not applied to your transcription.

For more information, see Using * vocabulary filtering with unwanted words.

*/ inline bool VocabularyFilterNameHasBeenSet() const { return m_vocabularyFilterNameHasBeenSet; } /** *

Specify the name of the custom vocabulary filter that you want to use when * processing your transcription. Note that vocabulary filter names are case * sensitive.

If the language of the specified custom vocabulary filter * doesn't match the language identified in your media, the vocabulary filter is * not applied to your transcription.

For more information, see Using * vocabulary filtering with unwanted words.

*/ inline void SetVocabularyFilterName(const Aws::String& value) { m_vocabularyFilterNameHasBeenSet = true; m_vocabularyFilterName = value; } /** *

Specify the name of the custom vocabulary filter that you want to use when * processing your transcription. Note that vocabulary filter names are case * sensitive.

If the language of the specified custom vocabulary filter * doesn't match the language identified in your media, the vocabulary filter is * not applied to your transcription.

For more information, see Using * vocabulary filtering with unwanted words.

*/ inline void SetVocabularyFilterName(Aws::String&& value) { m_vocabularyFilterNameHasBeenSet = true; m_vocabularyFilterName = std::move(value); } /** *

Specify the name of the custom vocabulary filter that you want to use when * processing your transcription. Note that vocabulary filter names are case * sensitive.

If the language of the specified custom vocabulary filter * doesn't match the language identified in your media, the vocabulary filter is * not applied to your transcription.

For more information, see Using * vocabulary filtering with unwanted words.

*/ inline void SetVocabularyFilterName(const char* value) { m_vocabularyFilterNameHasBeenSet = true; m_vocabularyFilterName.assign(value); } /** *

Specify the name of the custom vocabulary filter that you want to use when * processing your transcription. Note that vocabulary filter names are case * sensitive.

If the language of the specified custom vocabulary filter * doesn't match the language identified in your media, the vocabulary filter is * not applied to your transcription.

For more information, see Using * vocabulary filtering with unwanted words.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithVocabularyFilterName(const Aws::String& value) { SetVocabularyFilterName(value); return *this;} /** *

Specify the name of the custom vocabulary filter that you want to use when * processing your transcription. Note that vocabulary filter names are case * sensitive.

If the language of the specified custom vocabulary filter * doesn't match the language identified in your media, the vocabulary filter is * not applied to your transcription.

For more information, see Using * vocabulary filtering with unwanted words.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithVocabularyFilterName(Aws::String&& value) { SetVocabularyFilterName(std::move(value)); return *this;} /** *

Specify the name of the custom vocabulary filter that you want to use when * processing your transcription. Note that vocabulary filter names are case * sensitive.

If the language of the specified custom vocabulary filter * doesn't match the language identified in your media, the vocabulary filter is * not applied to your transcription.

For more information, see Using * vocabulary filtering with unwanted words.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithVocabularyFilterName(const char* value) { SetVocabularyFilterName(value); return *this;} /** *

Specify how you want your vocabulary filter applied to your transcript.

*

To replace words with ***, choose mask.

To * delete words, choose remove.

To flag words without changing * them, choose tag.

*/ inline const VocabularyFilterMethod& GetVocabularyFilterMethod() const{ return m_vocabularyFilterMethod; } /** *

Specify how you want your vocabulary filter applied to your transcript.

*

To replace words with ***, choose mask.

To * delete words, choose remove.

To flag words without changing * them, choose tag.

*/ inline bool VocabularyFilterMethodHasBeenSet() const { return m_vocabularyFilterMethodHasBeenSet; } /** *

Specify how you want your vocabulary filter applied to your transcript.

*

To replace words with ***, choose mask.

To * delete words, choose remove.

To flag words without changing * them, choose tag.

*/ inline void SetVocabularyFilterMethod(const VocabularyFilterMethod& value) { m_vocabularyFilterMethodHasBeenSet = true; m_vocabularyFilterMethod = value; } /** *

Specify how you want your vocabulary filter applied to your transcript.

*

To replace words with ***, choose mask.

To * delete words, choose remove.

To flag words without changing * them, choose tag.

*/ inline void SetVocabularyFilterMethod(VocabularyFilterMethod&& value) { m_vocabularyFilterMethodHasBeenSet = true; m_vocabularyFilterMethod = std::move(value); } /** *

Specify how you want your vocabulary filter applied to your transcript.

*

To replace words with ***, choose mask.

To * delete words, choose remove.

To flag words without changing * them, choose tag.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithVocabularyFilterMethod(const VocabularyFilterMethod& value) { SetVocabularyFilterMethod(value); return *this;} /** *

Specify how you want your vocabulary filter applied to your transcript.

*

To replace words with ***, choose mask.

To * delete words, choose remove.

To flag words without changing * them, choose tag.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithVocabularyFilterMethod(VocabularyFilterMethod&& value) { SetVocabularyFilterMethod(std::move(value)); return *this;} /** *

Specify the name of the custom language model that you want to use when * processing your transcription. Note that language model names are case * sensitive.

The language of the specified language model must match the * language code you specify in your transcription request. If the languages don't * match, the custom language model isn't applied. There are no errors or warnings * associated with a language mismatch.

For more information, see Custom * language models.

*/ inline const Aws::String& GetLanguageModelName() const{ return m_languageModelName; } /** *

Specify the name of the custom language model that you want to use when * processing your transcription. Note that language model names are case * sensitive.

The language of the specified language model must match the * language code you specify in your transcription request. If the languages don't * match, the custom language model isn't applied. There are no errors or warnings * associated with a language mismatch.

For more information, see Custom * language models.

*/ inline bool LanguageModelNameHasBeenSet() const { return m_languageModelNameHasBeenSet; } /** *

Specify the name of the custom language model that you want to use when * processing your transcription. Note that language model names are case * sensitive.

The language of the specified language model must match the * language code you specify in your transcription request. If the languages don't * match, the custom language model isn't applied. There are no errors or warnings * associated with a language mismatch.

For more information, see Custom * language models.

*/ inline void SetLanguageModelName(const Aws::String& value) { m_languageModelNameHasBeenSet = true; m_languageModelName = value; } /** *

Specify the name of the custom language model that you want to use when * processing your transcription. Note that language model names are case * sensitive.

The language of the specified language model must match the * language code you specify in your transcription request. If the languages don't * match, the custom language model isn't applied. There are no errors or warnings * associated with a language mismatch.

For more information, see Custom * language models.

*/ inline void SetLanguageModelName(Aws::String&& value) { m_languageModelNameHasBeenSet = true; m_languageModelName = std::move(value); } /** *

Specify the name of the custom language model that you want to use when * processing your transcription. Note that language model names are case * sensitive.

The language of the specified language model must match the * language code you specify in your transcription request. If the languages don't * match, the custom language model isn't applied. There are no errors or warnings * associated with a language mismatch.

For more information, see Custom * language models.

*/ inline void SetLanguageModelName(const char* value) { m_languageModelNameHasBeenSet = true; m_languageModelName.assign(value); } /** *

Specify the name of the custom language model that you want to use when * processing your transcription. Note that language model names are case * sensitive.

The language of the specified language model must match the * language code you specify in your transcription request. If the languages don't * match, the custom language model isn't applied. There are no errors or warnings * associated with a language mismatch.

For more information, see Custom * language models.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithLanguageModelName(const Aws::String& value) { SetLanguageModelName(value); return *this;} /** *

Specify the name of the custom language model that you want to use when * processing your transcription. Note that language model names are case * sensitive.

The language of the specified language model must match the * language code you specify in your transcription request. If the languages don't * match, the custom language model isn't applied. There are no errors or warnings * associated with a language mismatch.

For more information, see Custom * language models.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithLanguageModelName(Aws::String&& value) { SetLanguageModelName(std::move(value)); return *this;} /** *

Specify the name of the custom language model that you want to use when * processing your transcription. Note that language model names are case * sensitive.

The language of the specified language model must match the * language code you specify in your transcription request. If the languages don't * match, the custom language model isn't applied. There are no errors or warnings * associated with a language mismatch.

For more information, see Custom * language models.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithLanguageModelName(const char* value) { SetLanguageModelName(value); return *this;} /** *

Enables partial result stabilization for your transcription. Partial result * stabilization can reduce latency in your output, but may impact accuracy. For * more information, see Partial-result * stabilization.

*/ inline bool GetEnablePartialResultsStabilization() const{ return m_enablePartialResultsStabilization; } /** *

Enables partial result stabilization for your transcription. Partial result * stabilization can reduce latency in your output, but may impact accuracy. For * more information, see Partial-result * stabilization.

*/ inline bool EnablePartialResultsStabilizationHasBeenSet() const { return m_enablePartialResultsStabilizationHasBeenSet; } /** *

Enables partial result stabilization for your transcription. Partial result * stabilization can reduce latency in your output, but may impact accuracy. For * more information, see Partial-result * stabilization.

*/ inline void SetEnablePartialResultsStabilization(bool value) { m_enablePartialResultsStabilizationHasBeenSet = true; m_enablePartialResultsStabilization = value; } /** *

Enables partial result stabilization for your transcription. Partial result * stabilization can reduce latency in your output, but may impact accuracy. For * more information, see Partial-result * stabilization.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithEnablePartialResultsStabilization(bool value) { SetEnablePartialResultsStabilization(value); return *this;} /** *

Specify the level of stability to use when you enable partial results * stabilization (EnablePartialResultsStabilization).

Low * stability provides the highest accuracy. High stability transcribes faster, but * with slightly lower accuracy.

For more information, see Partial-result * stabilization.

*/ inline const PartialResultsStability& GetPartialResultsStability() const{ return m_partialResultsStability; } /** *

Specify the level of stability to use when you enable partial results * stabilization (EnablePartialResultsStabilization).

Low * stability provides the highest accuracy. High stability transcribes faster, but * with slightly lower accuracy.

For more information, see Partial-result * stabilization.

*/ inline bool PartialResultsStabilityHasBeenSet() const { return m_partialResultsStabilityHasBeenSet; } /** *

Specify the level of stability to use when you enable partial results * stabilization (EnablePartialResultsStabilization).

Low * stability provides the highest accuracy. High stability transcribes faster, but * with slightly lower accuracy.

For more information, see Partial-result * stabilization.

*/ inline void SetPartialResultsStability(const PartialResultsStability& value) { m_partialResultsStabilityHasBeenSet = true; m_partialResultsStability = value; } /** *

Specify the level of stability to use when you enable partial results * stabilization (EnablePartialResultsStabilization).

Low * stability provides the highest accuracy. High stability transcribes faster, but * with slightly lower accuracy.

For more information, see Partial-result * stabilization.

*/ inline void SetPartialResultsStability(PartialResultsStability&& value) { m_partialResultsStabilityHasBeenSet = true; m_partialResultsStability = std::move(value); } /** *

Specify the level of stability to use when you enable partial results * stabilization (EnablePartialResultsStabilization).

Low * stability provides the highest accuracy. High stability transcribes faster, but * with slightly lower accuracy.

For more information, see Partial-result * stabilization.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithPartialResultsStability(const PartialResultsStability& value) { SetPartialResultsStability(value); return *this;} /** *

Specify the level of stability to use when you enable partial results * stabilization (EnablePartialResultsStabilization).

Low * stability provides the highest accuracy. High stability transcribes faster, but * with slightly lower accuracy.

For more information, see Partial-result * stabilization.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithPartialResultsStability(PartialResultsStability&& value) { SetPartialResultsStability(std::move(value)); return *this;} /** *

Labels all personally identifiable information (PII) identified in your * transcript.

Content identification is performed at the segment level; PII * specified in PiiEntityTypes is flagged upon complete transcription * of an audio segment.

You can’t set ContentIdentificationType * and ContentRedactionType in the same request. If you set both, your * request returns a BadRequestException.

For more information, * see Redacting * or identifying personally identifiable information.

*/ inline const ContentIdentificationType& GetContentIdentificationType() const{ return m_contentIdentificationType; } /** *

Labels all personally identifiable information (PII) identified in your * transcript.

Content identification is performed at the segment level; PII * specified in PiiEntityTypes is flagged upon complete transcription * of an audio segment.

You can’t set ContentIdentificationType * and ContentRedactionType in the same request. If you set both, your * request returns a BadRequestException.

For more information, * see Redacting * or identifying personally identifiable information.

*/ inline bool ContentIdentificationTypeHasBeenSet() const { return m_contentIdentificationTypeHasBeenSet; } /** *

Labels all personally identifiable information (PII) identified in your * transcript.

Content identification is performed at the segment level; PII * specified in PiiEntityTypes is flagged upon complete transcription * of an audio segment.

You can’t set ContentIdentificationType * and ContentRedactionType in the same request. If you set both, your * request returns a BadRequestException.

For more information, * see Redacting * or identifying personally identifiable information.

*/ inline void SetContentIdentificationType(const ContentIdentificationType& value) { m_contentIdentificationTypeHasBeenSet = true; m_contentIdentificationType = value; } /** *

Labels all personally identifiable information (PII) identified in your * transcript.

Content identification is performed at the segment level; PII * specified in PiiEntityTypes is flagged upon complete transcription * of an audio segment.

You can’t set ContentIdentificationType * and ContentRedactionType in the same request. If you set both, your * request returns a BadRequestException.

For more information, * see Redacting * or identifying personally identifiable information.

*/ inline void SetContentIdentificationType(ContentIdentificationType&& value) { m_contentIdentificationTypeHasBeenSet = true; m_contentIdentificationType = std::move(value); } /** *

Labels all personally identifiable information (PII) identified in your * transcript.

Content identification is performed at the segment level; PII * specified in PiiEntityTypes is flagged upon complete transcription * of an audio segment.

You can’t set ContentIdentificationType * and ContentRedactionType in the same request. If you set both, your * request returns a BadRequestException.

For more information, * see Redacting * or identifying personally identifiable information.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithContentIdentificationType(const ContentIdentificationType& value) { SetContentIdentificationType(value); return *this;} /** *

Labels all personally identifiable information (PII) identified in your * transcript.

Content identification is performed at the segment level; PII * specified in PiiEntityTypes is flagged upon complete transcription * of an audio segment.

You can’t set ContentIdentificationType * and ContentRedactionType in the same request. If you set both, your * request returns a BadRequestException.

For more information, * see Redacting * or identifying personally identifiable information.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithContentIdentificationType(ContentIdentificationType&& value) { SetContentIdentificationType(std::move(value)); return *this;} /** *

Redacts all personally identifiable information (PII) identified in your * transcript.

Content redaction is performed at the segment level; PII * specified in PiiEntityTypes is redacted upon complete transcription * of an audio segment.

You can’t set ContentRedactionType and * ContentIdentificationType in the same request. If you set both, * your request returns a BadRequestException.

For more * information, see Redacting * or identifying personally identifiable information.

*/ inline const ContentRedactionType& GetContentRedactionType() const{ return m_contentRedactionType; } /** *

Redacts all personally identifiable information (PII) identified in your * transcript.

Content redaction is performed at the segment level; PII * specified in PiiEntityTypes is redacted upon complete transcription * of an audio segment.

You can’t set ContentRedactionType and * ContentIdentificationType in the same request. If you set both, * your request returns a BadRequestException.

For more * information, see Redacting * or identifying personally identifiable information.

*/ inline bool ContentRedactionTypeHasBeenSet() const { return m_contentRedactionTypeHasBeenSet; } /** *

Redacts all personally identifiable information (PII) identified in your * transcript.

Content redaction is performed at the segment level; PII * specified in PiiEntityTypes is redacted upon complete transcription * of an audio segment.

You can’t set ContentRedactionType and * ContentIdentificationType in the same request. If you set both, * your request returns a BadRequestException.

For more * information, see Redacting * or identifying personally identifiable information.

*/ inline void SetContentRedactionType(const ContentRedactionType& value) { m_contentRedactionTypeHasBeenSet = true; m_contentRedactionType = value; } /** *

Redacts all personally identifiable information (PII) identified in your * transcript.

Content redaction is performed at the segment level; PII * specified in PiiEntityTypes is redacted upon complete transcription * of an audio segment.

You can’t set ContentRedactionType and * ContentIdentificationType in the same request. If you set both, * your request returns a BadRequestException.

For more * information, see Redacting * or identifying personally identifiable information.

*/ inline void SetContentRedactionType(ContentRedactionType&& value) { m_contentRedactionTypeHasBeenSet = true; m_contentRedactionType = std::move(value); } /** *

Redacts all personally identifiable information (PII) identified in your * transcript.

Content redaction is performed at the segment level; PII * specified in PiiEntityTypes is redacted upon complete transcription * of an audio segment.

You can’t set ContentRedactionType and * ContentIdentificationType in the same request. If you set both, * your request returns a BadRequestException.

For more * information, see Redacting * or identifying personally identifiable information.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithContentRedactionType(const ContentRedactionType& value) { SetContentRedactionType(value); return *this;} /** *

Redacts all personally identifiable information (PII) identified in your * transcript.

Content redaction is performed at the segment level; PII * specified in PiiEntityTypes is redacted upon complete transcription * of an audio segment.

You can’t set ContentRedactionType and * ContentIdentificationType in the same request. If you set both, * your request returns a BadRequestException.

For more * information, see Redacting * or identifying personally identifiable information.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithContentRedactionType(ContentRedactionType&& value) { SetContentRedactionType(std::move(value)); return *this;} /** *

Specify which types of personally identifiable information (PII) you want to * redact in your transcript. You can include as many types as you'd like, or you * can select ALL.

To include PiiEntityTypes in * your Call Analytics request, you must also include either * ContentIdentificationType or ContentRedactionType.

*

Values must be comma-separated and can include: * BANK_ACCOUNT_NUMBER, BANK_ROUTING, * CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, * CREDIT_DEBIT_EXPIRY, PIN, EMAIL, * ADDRESS, NAME, PHONE, SSN, * or ALL.

*/ inline const Aws::String& GetPiiEntityTypes() const{ return m_piiEntityTypes; } /** *

Specify which types of personally identifiable information (PII) you want to * redact in your transcript. You can include as many types as you'd like, or you * can select ALL.

To include PiiEntityTypes in * your Call Analytics request, you must also include either * ContentIdentificationType or ContentRedactionType.

*

Values must be comma-separated and can include: * BANK_ACCOUNT_NUMBER, BANK_ROUTING, * CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, * CREDIT_DEBIT_EXPIRY, PIN, EMAIL, * ADDRESS, NAME, PHONE, SSN, * or ALL.

*/ inline bool PiiEntityTypesHasBeenSet() const { return m_piiEntityTypesHasBeenSet; } /** *

Specify which types of personally identifiable information (PII) you want to * redact in your transcript. You can include as many types as you'd like, or you * can select ALL.

To include PiiEntityTypes in * your Call Analytics request, you must also include either * ContentIdentificationType or ContentRedactionType.

*

Values must be comma-separated and can include: * BANK_ACCOUNT_NUMBER, BANK_ROUTING, * CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, * CREDIT_DEBIT_EXPIRY, PIN, EMAIL, * ADDRESS, NAME, PHONE, SSN, * or ALL.

*/ inline void SetPiiEntityTypes(const Aws::String& value) { m_piiEntityTypesHasBeenSet = true; m_piiEntityTypes = value; } /** *

Specify which types of personally identifiable information (PII) you want to * redact in your transcript. You can include as many types as you'd like, or you * can select ALL.

To include PiiEntityTypes in * your Call Analytics request, you must also include either * ContentIdentificationType or ContentRedactionType.

*

Values must be comma-separated and can include: * BANK_ACCOUNT_NUMBER, BANK_ROUTING, * CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, * CREDIT_DEBIT_EXPIRY, PIN, EMAIL, * ADDRESS, NAME, PHONE, SSN, * or ALL.

*/ inline void SetPiiEntityTypes(Aws::String&& value) { m_piiEntityTypesHasBeenSet = true; m_piiEntityTypes = std::move(value); } /** *

Specify which types of personally identifiable information (PII) you want to * redact in your transcript. You can include as many types as you'd like, or you * can select ALL.

To include PiiEntityTypes in * your Call Analytics request, you must also include either * ContentIdentificationType or ContentRedactionType.

*

Values must be comma-separated and can include: * BANK_ACCOUNT_NUMBER, BANK_ROUTING, * CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, * CREDIT_DEBIT_EXPIRY, PIN, EMAIL, * ADDRESS, NAME, PHONE, SSN, * or ALL.

*/ inline void SetPiiEntityTypes(const char* value) { m_piiEntityTypesHasBeenSet = true; m_piiEntityTypes.assign(value); } /** *

Specify which types of personally identifiable information (PII) you want to * redact in your transcript. You can include as many types as you'd like, or you * can select ALL.

To include PiiEntityTypes in * your Call Analytics request, you must also include either * ContentIdentificationType or ContentRedactionType.

*

Values must be comma-separated and can include: * BANK_ACCOUNT_NUMBER, BANK_ROUTING, * CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, * CREDIT_DEBIT_EXPIRY, PIN, EMAIL, * ADDRESS, NAME, PHONE, SSN, * or ALL.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithPiiEntityTypes(const Aws::String& value) { SetPiiEntityTypes(value); return *this;} /** *

Specify which types of personally identifiable information (PII) you want to * redact in your transcript. You can include as many types as you'd like, or you * can select ALL.

To include PiiEntityTypes in * your Call Analytics request, you must also include either * ContentIdentificationType or ContentRedactionType.

*

Values must be comma-separated and can include: * BANK_ACCOUNT_NUMBER, BANK_ROUTING, * CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, * CREDIT_DEBIT_EXPIRY, PIN, EMAIL, * ADDRESS, NAME, PHONE, SSN, * or ALL.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithPiiEntityTypes(Aws::String&& value) { SetPiiEntityTypes(std::move(value)); return *this;} /** *

Specify which types of personally identifiable information (PII) you want to * redact in your transcript. You can include as many types as you'd like, or you * can select ALL.

To include PiiEntityTypes in * your Call Analytics request, you must also include either * ContentIdentificationType or ContentRedactionType.

*

Values must be comma-separated and can include: * BANK_ACCOUNT_NUMBER, BANK_ROUTING, * CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, * CREDIT_DEBIT_EXPIRY, PIN, EMAIL, * ADDRESS, NAME, PHONE, SSN, * or ALL.

*/ inline StartCallAnalyticsStreamTranscriptionRequest& WithPiiEntityTypes(const char* value) { SetPiiEntityTypes(value); return *this;} private: CallAnalyticsLanguageCode m_languageCode; bool m_languageCodeHasBeenSet = false; int m_mediaSampleRateHertz; bool m_mediaSampleRateHertzHasBeenSet = false; MediaEncoding m_mediaEncoding; bool m_mediaEncodingHasBeenSet = false; Aws::String m_vocabularyName; bool m_vocabularyNameHasBeenSet = false; Aws::String m_sessionId; bool m_sessionIdHasBeenSet = false; std::shared_ptr m_audioStream; Aws::String m_vocabularyFilterName; bool m_vocabularyFilterNameHasBeenSet = false; VocabularyFilterMethod m_vocabularyFilterMethod; bool m_vocabularyFilterMethodHasBeenSet = false; Aws::String m_languageModelName; bool m_languageModelNameHasBeenSet = false; bool m_enablePartialResultsStabilization; bool m_enablePartialResultsStabilizationHasBeenSet = false; PartialResultsStability m_partialResultsStability; bool m_partialResultsStabilityHasBeenSet = false; ContentIdentificationType m_contentIdentificationType; bool m_contentIdentificationTypeHasBeenSet = false; ContentRedactionType m_contentRedactionType; bool m_contentRedactionTypeHasBeenSet = false; Aws::String m_piiEntityTypes; bool m_piiEntityTypesHasBeenSet = false; StartCallAnalyticsStreamTranscriptionHandler m_handler; Aws::Utils::Event::EventStreamDecoder m_decoder; }; } // namespace Model } // namespace TranscribeStreamingService } // namespace Aws