/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace Polly { namespace Model { /** *

SynthesisTask object that provides information about a speech synthesis * task.

See Also:

AWS * API Reference

*/ class SynthesisTask { public: AWS_POLLY_API SynthesisTask(); AWS_POLLY_API SynthesisTask(Aws::Utils::Json::JsonView jsonValue); AWS_POLLY_API SynthesisTask& operator=(Aws::Utils::Json::JsonView jsonValue); AWS_POLLY_API Aws::Utils::Json::JsonValue Jsonize() const; /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. Using a * voice that is not supported for the engine selected will result in an error.

*/ inline const Engine& GetEngine() const{ return m_engine; } /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. Using a * voice that is not supported for the engine selected will result in an error.

*/ inline bool EngineHasBeenSet() const { return m_engineHasBeenSet; } /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. Using a * voice that is not supported for the engine selected will result in an error.

*/ inline void SetEngine(const Engine& value) { m_engineHasBeenSet = true; m_engine = value; } /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. Using a * voice that is not supported for the engine selected will result in an error.

*/ inline void SetEngine(Engine&& value) { m_engineHasBeenSet = true; m_engine = std::move(value); } /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. Using a * voice that is not supported for the engine selected will result in an error.

*/ inline SynthesisTask& WithEngine(const Engine& value) { SetEngine(value); return *this;} /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. Using a * voice that is not supported for the engine selected will result in an error.

*/ inline SynthesisTask& WithEngine(Engine&& value) { SetEngine(std::move(value)); return *this;} /** *

The Amazon Polly generated identifier for a speech synthesis task.

*/ inline const Aws::String& GetTaskId() const{ return m_taskId; } /** *

The Amazon Polly generated identifier for a speech synthesis task.

*/ inline bool TaskIdHasBeenSet() const { return m_taskIdHasBeenSet; } /** *

The Amazon Polly generated identifier for a speech synthesis task.

*/ inline void SetTaskId(const Aws::String& value) { m_taskIdHasBeenSet = true; m_taskId = value; } /** *

The Amazon Polly generated identifier for a speech synthesis task.

*/ inline void SetTaskId(Aws::String&& value) { m_taskIdHasBeenSet = true; m_taskId = std::move(value); } /** *

The Amazon Polly generated identifier for a speech synthesis task.

*/ inline void SetTaskId(const char* value) { m_taskIdHasBeenSet = true; m_taskId.assign(value); } /** *

The Amazon Polly generated identifier for a speech synthesis task.

*/ inline SynthesisTask& WithTaskId(const Aws::String& value) { SetTaskId(value); return *this;} /** *

The Amazon Polly generated identifier for a speech synthesis task.

*/ inline SynthesisTask& WithTaskId(Aws::String&& value) { SetTaskId(std::move(value)); return *this;} /** *

The Amazon Polly generated identifier for a speech synthesis task.

*/ inline SynthesisTask& WithTaskId(const char* value) { SetTaskId(value); return *this;} /** *

Current status of the individual speech synthesis task.

*/ inline const TaskStatus& GetTaskStatus() const{ return m_taskStatus; } /** *

Current status of the individual speech synthesis task.

*/ inline bool TaskStatusHasBeenSet() const { return m_taskStatusHasBeenSet; } /** *

Current status of the individual speech synthesis task.

*/ inline void SetTaskStatus(const TaskStatus& value) { m_taskStatusHasBeenSet = true; m_taskStatus = value; } /** *

Current status of the individual speech synthesis task.

*/ inline void SetTaskStatus(TaskStatus&& value) { m_taskStatusHasBeenSet = true; m_taskStatus = std::move(value); } /** *

Current status of the individual speech synthesis task.

*/ inline SynthesisTask& WithTaskStatus(const TaskStatus& value) { SetTaskStatus(value); return *this;} /** *

Current status of the individual speech synthesis task.

*/ inline SynthesisTask& WithTaskStatus(TaskStatus&& value) { SetTaskStatus(std::move(value)); return *this;} /** *

Reason for the current status of a specific speech synthesis task, including * errors if the task has failed.

*/ inline const Aws::String& GetTaskStatusReason() const{ return m_taskStatusReason; } /** *

Reason for the current status of a specific speech synthesis task, including * errors if the task has failed.

*/ inline bool TaskStatusReasonHasBeenSet() const { return m_taskStatusReasonHasBeenSet; } /** *

Reason for the current status of a specific speech synthesis task, including * errors if the task has failed.

*/ inline void SetTaskStatusReason(const Aws::String& value) { m_taskStatusReasonHasBeenSet = true; m_taskStatusReason = value; } /** *

Reason for the current status of a specific speech synthesis task, including * errors if the task has failed.

*/ inline void SetTaskStatusReason(Aws::String&& value) { m_taskStatusReasonHasBeenSet = true; m_taskStatusReason = std::move(value); } /** *

Reason for the current status of a specific speech synthesis task, including * errors if the task has failed.

*/ inline void SetTaskStatusReason(const char* value) { m_taskStatusReasonHasBeenSet = true; m_taskStatusReason.assign(value); } /** *

Reason for the current status of a specific speech synthesis task, including * errors if the task has failed.

*/ inline SynthesisTask& WithTaskStatusReason(const Aws::String& value) { SetTaskStatusReason(value); return *this;} /** *

Reason for the current status of a specific speech synthesis task, including * errors if the task has failed.

*/ inline SynthesisTask& WithTaskStatusReason(Aws::String&& value) { SetTaskStatusReason(std::move(value)); return *this;} /** *

Reason for the current status of a specific speech synthesis task, including * errors if the task has failed.

*/ inline SynthesisTask& WithTaskStatusReason(const char* value) { SetTaskStatusReason(value); return *this;} /** *

Pathway for the output speech file.

*/ inline const Aws::String& GetOutputUri() const{ return m_outputUri; } /** *

Pathway for the output speech file.

*/ inline bool OutputUriHasBeenSet() const { return m_outputUriHasBeenSet; } /** *

Pathway for the output speech file.

*/ inline void SetOutputUri(const Aws::String& value) { m_outputUriHasBeenSet = true; m_outputUri = value; } /** *

Pathway for the output speech file.

*/ inline void SetOutputUri(Aws::String&& value) { m_outputUriHasBeenSet = true; m_outputUri = std::move(value); } /** *

Pathway for the output speech file.

*/ inline void SetOutputUri(const char* value) { m_outputUriHasBeenSet = true; m_outputUri.assign(value); } /** *

Pathway for the output speech file.

*/ inline SynthesisTask& WithOutputUri(const Aws::String& value) { SetOutputUri(value); return *this;} /** *

Pathway for the output speech file.

*/ inline SynthesisTask& WithOutputUri(Aws::String&& value) { SetOutputUri(std::move(value)); return *this;} /** *

Pathway for the output speech file.

*/ inline SynthesisTask& WithOutputUri(const char* value) { SetOutputUri(value); return *this;} /** *

Timestamp for the time the synthesis task was started.

*/ inline const Aws::Utils::DateTime& GetCreationTime() const{ return m_creationTime; } /** *

Timestamp for the time the synthesis task was started.

*/ inline bool CreationTimeHasBeenSet() const { return m_creationTimeHasBeenSet; } /** *

Timestamp for the time the synthesis task was started.

*/ inline void SetCreationTime(const Aws::Utils::DateTime& value) { m_creationTimeHasBeenSet = true; m_creationTime = value; } /** *

Timestamp for the time the synthesis task was started.

*/ inline void SetCreationTime(Aws::Utils::DateTime&& value) { m_creationTimeHasBeenSet = true; m_creationTime = std::move(value); } /** *

Timestamp for the time the synthesis task was started.

*/ inline SynthesisTask& WithCreationTime(const Aws::Utils::DateTime& value) { SetCreationTime(value); return *this;} /** *

Timestamp for the time the synthesis task was started.

*/ inline SynthesisTask& WithCreationTime(Aws::Utils::DateTime&& value) { SetCreationTime(std::move(value)); return *this;} /** *

Number of billable characters synthesized.

*/ inline int GetRequestCharacters() const{ return m_requestCharacters; } /** *

Number of billable characters synthesized.

*/ inline bool RequestCharactersHasBeenSet() const { return m_requestCharactersHasBeenSet; } /** *

Number of billable characters synthesized.

*/ inline void SetRequestCharacters(int value) { m_requestCharactersHasBeenSet = true; m_requestCharacters = value; } /** *

Number of billable characters synthesized.

*/ inline SynthesisTask& WithRequestCharacters(int value) { SetRequestCharacters(value); return *this;} /** *

ARN for the SNS topic optionally used for providing status notification for a * speech synthesis task.

*/ inline const Aws::String& GetSnsTopicArn() const{ return m_snsTopicArn; } /** *

ARN for the SNS topic optionally used for providing status notification for a * speech synthesis task.

*/ inline bool SnsTopicArnHasBeenSet() const { return m_snsTopicArnHasBeenSet; } /** *

ARN for the SNS topic optionally used for providing status notification for a * speech synthesis task.

*/ inline void SetSnsTopicArn(const Aws::String& value) { m_snsTopicArnHasBeenSet = true; m_snsTopicArn = value; } /** *

ARN for the SNS topic optionally used for providing status notification for a * speech synthesis task.

*/ inline void SetSnsTopicArn(Aws::String&& value) { m_snsTopicArnHasBeenSet = true; m_snsTopicArn = std::move(value); } /** *

ARN for the SNS topic optionally used for providing status notification for a * speech synthesis task.

*/ inline void SetSnsTopicArn(const char* value) { m_snsTopicArnHasBeenSet = true; m_snsTopicArn.assign(value); } /** *

ARN for the SNS topic optionally used for providing status notification for a * speech synthesis task.

*/ inline SynthesisTask& WithSnsTopicArn(const Aws::String& value) { SetSnsTopicArn(value); return *this;} /** *

ARN for the SNS topic optionally used for providing status notification for a * speech synthesis task.

*/ inline SynthesisTask& WithSnsTopicArn(Aws::String&& value) { SetSnsTopicArn(std::move(value)); return *this;} /** *

ARN for the SNS topic optionally used for providing status notification for a * speech synthesis task.

*/ inline SynthesisTask& WithSnsTopicArn(const char* value) { SetSnsTopicArn(value); return *this;} /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline const Aws::Vector& GetLexiconNames() const{ return m_lexiconNames; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline bool LexiconNamesHasBeenSet() const { return m_lexiconNamesHasBeenSet; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline void SetLexiconNames(const Aws::Vector& value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames = value; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline void SetLexiconNames(Aws::Vector&& value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames = std::move(value); } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline SynthesisTask& WithLexiconNames(const Aws::Vector& value) { SetLexiconNames(value); return *this;} /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline SynthesisTask& WithLexiconNames(Aws::Vector&& value) { SetLexiconNames(std::move(value)); return *this;} /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline SynthesisTask& AddLexiconNames(const Aws::String& value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames.push_back(value); return *this; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline SynthesisTask& AddLexiconNames(Aws::String&& value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames.push_back(std::move(value)); return *this; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice.

*/ inline SynthesisTask& AddLexiconNames(const char* value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames.push_back(value); return *this; } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*/ inline const OutputFormat& GetOutputFormat() const{ return m_outputFormat; } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*/ inline bool OutputFormatHasBeenSet() const { return m_outputFormatHasBeenSet; } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*/ inline void SetOutputFormat(const OutputFormat& value) { m_outputFormatHasBeenSet = true; m_outputFormat = value; } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*/ inline void SetOutputFormat(OutputFormat&& value) { m_outputFormatHasBeenSet = true; m_outputFormat = std::move(value); } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*/ inline SynthesisTask& WithOutputFormat(const OutputFormat& value) { SetOutputFormat(value); return *this;} /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*/ inline SynthesisTask& WithOutputFormat(OutputFormat&& value) { SetOutputFormat(std::move(value)); return *this;} /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline const Aws::String& GetSampleRate() const{ return m_sampleRate; } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline bool SampleRateHasBeenSet() const { return m_sampleRateHasBeenSet; } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline void SetSampleRate(const Aws::String& value) { m_sampleRateHasBeenSet = true; m_sampleRate = value; } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline void SetSampleRate(Aws::String&& value) { m_sampleRateHasBeenSet = true; m_sampleRate = std::move(value); } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline void SetSampleRate(const char* value) { m_sampleRateHasBeenSet = true; m_sampleRate.assign(value); } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline SynthesisTask& WithSampleRate(const Aws::String& value) { SetSampleRate(value); return *this;} /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline SynthesisTask& WithSampleRate(Aws::String&& value) { SetSampleRate(std::move(value)); return *this;} /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline SynthesisTask& WithSampleRate(const char* value) { SetSampleRate(value); return *this;} /** *

The type of speech marks returned for the input text.

*/ inline const Aws::Vector& GetSpeechMarkTypes() const{ return m_speechMarkTypes; } /** *

The type of speech marks returned for the input text.

*/ inline bool SpeechMarkTypesHasBeenSet() const { return m_speechMarkTypesHasBeenSet; } /** *

The type of speech marks returned for the input text.

*/ inline void SetSpeechMarkTypes(const Aws::Vector& value) { m_speechMarkTypesHasBeenSet = true; m_speechMarkTypes = value; } /** *

The type of speech marks returned for the input text.

*/ inline void SetSpeechMarkTypes(Aws::Vector&& value) { m_speechMarkTypesHasBeenSet = true; m_speechMarkTypes = std::move(value); } /** *

The type of speech marks returned for the input text.

*/ inline SynthesisTask& WithSpeechMarkTypes(const Aws::Vector& value) { SetSpeechMarkTypes(value); return *this;} /** *

The type of speech marks returned for the input text.

*/ inline SynthesisTask& WithSpeechMarkTypes(Aws::Vector&& value) { SetSpeechMarkTypes(std::move(value)); return *this;} /** *

The type of speech marks returned for the input text.

*/ inline SynthesisTask& AddSpeechMarkTypes(const SpeechMarkType& value) { m_speechMarkTypesHasBeenSet = true; m_speechMarkTypes.push_back(value); return *this; } /** *

The type of speech marks returned for the input text.

*/ inline SynthesisTask& AddSpeechMarkTypes(SpeechMarkType&& value) { m_speechMarkTypesHasBeenSet = true; m_speechMarkTypes.push_back(std::move(value)); return *this; } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text.

*/ inline const TextType& GetTextType() const{ return m_textType; } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text.

*/ inline bool TextTypeHasBeenSet() const { return m_textTypeHasBeenSet; } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text.

*/ inline void SetTextType(const TextType& value) { m_textTypeHasBeenSet = true; m_textType = value; } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text.

*/ inline void SetTextType(TextType&& value) { m_textTypeHasBeenSet = true; m_textType = std::move(value); } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text.

*/ inline SynthesisTask& WithTextType(const TextType& value) { SetTextType(value); return *this;} /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text.

*/ inline SynthesisTask& WithTextType(TextType&& value) { SetTextType(std::move(value)); return *this;} /** *

Voice ID to use for the synthesis.

*/ inline const VoiceId& GetVoiceId() const{ return m_voiceId; } /** *

Voice ID to use for the synthesis.

*/ inline bool VoiceIdHasBeenSet() const { return m_voiceIdHasBeenSet; } /** *

Voice ID to use for the synthesis.

*/ inline void SetVoiceId(const VoiceId& value) { m_voiceIdHasBeenSet = true; m_voiceId = value; } /** *

Voice ID to use for the synthesis.

*/ inline void SetVoiceId(VoiceId&& value) { m_voiceIdHasBeenSet = true; m_voiceId = std::move(value); } /** *

Voice ID to use for the synthesis.

*/ inline SynthesisTask& WithVoiceId(const VoiceId& value) { SetVoiceId(value); return *this;} /** *

Voice ID to use for the synthesis.

*/ inline SynthesisTask& WithVoiceId(VoiceId&& value) { SetVoiceId(std::move(value)); return *this;} /** *

Optional language code for a synthesis task. This is only necessary if using * a bilingual voice, such as Aditi, which can be used for either Indian English * (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language * code is specified, Amazon Polly uses the default language of the bilingual * voice. The default language for any voice is the one returned by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline const LanguageCode& GetLanguageCode() const{ return m_languageCode; } /** *

Optional language code for a synthesis task. This is only necessary if using * a bilingual voice, such as Aditi, which can be used for either Indian English * (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language * code is specified, Amazon Polly uses the default language of the bilingual * voice. The default language for any voice is the one returned by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline bool LanguageCodeHasBeenSet() const { return m_languageCodeHasBeenSet; } /** *

Optional language code for a synthesis task. This is only necessary if using * a bilingual voice, such as Aditi, which can be used for either Indian English * (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language * code is specified, Amazon Polly uses the default language of the bilingual * voice. The default language for any voice is the one returned by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline void SetLanguageCode(const LanguageCode& value) { m_languageCodeHasBeenSet = true; m_languageCode = value; } /** *

Optional language code for a synthesis task. This is only necessary if using * a bilingual voice, such as Aditi, which can be used for either Indian English * (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language * code is specified, Amazon Polly uses the default language of the bilingual * voice. The default language for any voice is the one returned by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline void SetLanguageCode(LanguageCode&& value) { m_languageCodeHasBeenSet = true; m_languageCode = std::move(value); } /** *

Optional language code for a synthesis task. This is only necessary if using * a bilingual voice, such as Aditi, which can be used for either Indian English * (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language * code is specified, Amazon Polly uses the default language of the bilingual * voice. The default language for any voice is the one returned by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline SynthesisTask& WithLanguageCode(const LanguageCode& value) { SetLanguageCode(value); return *this;} /** *

Optional language code for a synthesis task. This is only necessary if using * a bilingual voice, such as Aditi, which can be used for either Indian English * (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language * code is specified, Amazon Polly uses the default language of the bilingual * voice. The default language for any voice is the one returned by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline SynthesisTask& WithLanguageCode(LanguageCode&& value) { SetLanguageCode(std::move(value)); return *this;} private: Engine m_engine; bool m_engineHasBeenSet = false; Aws::String m_taskId; bool m_taskIdHasBeenSet = false; TaskStatus m_taskStatus; bool m_taskStatusHasBeenSet = false; Aws::String m_taskStatusReason; bool m_taskStatusReasonHasBeenSet = false; Aws::String m_outputUri; bool m_outputUriHasBeenSet = false; Aws::Utils::DateTime m_creationTime; bool m_creationTimeHasBeenSet = false; int m_requestCharacters; bool m_requestCharactersHasBeenSet = false; Aws::String m_snsTopicArn; bool m_snsTopicArnHasBeenSet = false; Aws::Vector m_lexiconNames; bool m_lexiconNamesHasBeenSet = false; OutputFormat m_outputFormat; bool m_outputFormatHasBeenSet = false; Aws::String m_sampleRate; bool m_sampleRateHasBeenSet = false; Aws::Vector m_speechMarkTypes; bool m_speechMarkTypesHasBeenSet = false; TextType m_textType; bool m_textTypeHasBeenSet = false; VoiceId m_voiceId; bool m_voiceIdHasBeenSet = false; LanguageCode m_languageCode; bool m_languageCodeHasBeenSet = false; }; } // namespace Model } // namespace Polly } // namespace Aws