/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace Polly { namespace Model { /** */ class SynthesizeSpeechRequest : public PollyRequest { public: AWS_POLLY_API SynthesizeSpeechRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "SynthesizeSpeech"; } AWS_POLLY_API Aws::String SerializePayload() const override; protected: AWS_POLLY_API void DumpBodyToUrl(Aws::Http::URI& uri ) const override; public: /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. For * information on Amazon Polly voices and which voices are available in * standard-only, NTTS-only, and both standard and NTTS formats, see Available * Voices.

NTTS-only voices

When using NTTS-only voices * such as Kevin (en-US), this parameter is required and must be set to * neural. If the engine is not specified, or is set to * standard, this will result in an error.

Type: String

*

Valid Values: standard | neural

Required: * Yes

Standard voices

For standard voices, this is not * required; the engine parameter defaults to standard. If the engine * is not specified, or is set to standard and an NTTS-only voice is * selected, this will result in an error.

*/ inline const Engine& GetEngine() const{ return m_engine; } /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. For * information on Amazon Polly voices and which voices are available in * standard-only, NTTS-only, and both standard and NTTS formats, see Available * Voices.

NTTS-only voices

When using NTTS-only voices * such as Kevin (en-US), this parameter is required and must be set to * neural. If the engine is not specified, or is set to * standard, this will result in an error.

Type: String

*

Valid Values: standard | neural

Required: * Yes

Standard voices

For standard voices, this is not * required; the engine parameter defaults to standard. If the engine * is not specified, or is set to standard and an NTTS-only voice is * selected, this will result in an error.

*/ inline bool EngineHasBeenSet() const { return m_engineHasBeenSet; } /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. For * information on Amazon Polly voices and which voices are available in * standard-only, NTTS-only, and both standard and NTTS formats, see Available * Voices.

NTTS-only voices

When using NTTS-only voices * such as Kevin (en-US), this parameter is required and must be set to * neural. If the engine is not specified, or is set to * standard, this will result in an error.

Type: String

*

Valid Values: standard | neural

Required: * Yes

Standard voices

For standard voices, this is not * required; the engine parameter defaults to standard. If the engine * is not specified, or is set to standard and an NTTS-only voice is * selected, this will result in an error.

*/ inline void SetEngine(const Engine& value) { m_engineHasBeenSet = true; m_engine = value; } /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. For * information on Amazon Polly voices and which voices are available in * standard-only, NTTS-only, and both standard and NTTS formats, see Available * Voices.

NTTS-only voices

When using NTTS-only voices * such as Kevin (en-US), this parameter is required and must be set to * neural. If the engine is not specified, or is set to * standard, this will result in an error.

Type: String

*

Valid Values: standard | neural

Required: * Yes

Standard voices

For standard voices, this is not * required; the engine parameter defaults to standard. If the engine * is not specified, or is set to standard and an NTTS-only voice is * selected, this will result in an error.

*/ inline void SetEngine(Engine&& value) { m_engineHasBeenSet = true; m_engine = std::move(value); } /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. For * information on Amazon Polly voices and which voices are available in * standard-only, NTTS-only, and both standard and NTTS formats, see Available * Voices.

NTTS-only voices

When using NTTS-only voices * such as Kevin (en-US), this parameter is required and must be set to * neural. If the engine is not specified, or is set to * standard, this will result in an error.

Type: String

*

Valid Values: standard | neural

Required: * Yes

Standard voices

For standard voices, this is not * required; the engine parameter defaults to standard. If the engine * is not specified, or is set to standard and an NTTS-only voice is * selected, this will result in an error.

*/ inline SynthesizeSpeechRequest& WithEngine(const Engine& value) { SetEngine(value); return *this;} /** *

Specifies the engine (standard or neural) for * Amazon Polly to use when processing input text for speech synthesis. For * information on Amazon Polly voices and which voices are available in * standard-only, NTTS-only, and both standard and NTTS formats, see Available * Voices.

NTTS-only voices

When using NTTS-only voices * such as Kevin (en-US), this parameter is required and must be set to * neural. If the engine is not specified, or is set to * standard, this will result in an error.

Type: String

*

Valid Values: standard | neural

Required: * Yes

Standard voices

For standard voices, this is not * required; the engine parameter defaults to standard. If the engine * is not specified, or is set to standard and an NTTS-only voice is * selected, this will result in an error.

*/ inline SynthesizeSpeechRequest& WithEngine(Engine&& value) { SetEngine(std::move(value)); return *this;} /** *

Optional language code for the Synthesize Speech request. This is only * necessary if using a bilingual voice, such as Aditi, which can be used for * either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is * used and no language code is specified, Amazon Polly uses the default language * of the bilingual voice. The default language for any voice is the one returned * by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline const LanguageCode& GetLanguageCode() const{ return m_languageCode; } /** *

Optional language code for the Synthesize Speech request. This is only * necessary if using a bilingual voice, such as Aditi, which can be used for * either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is * used and no language code is specified, Amazon Polly uses the default language * of the bilingual voice. The default language for any voice is the one returned * by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline bool LanguageCodeHasBeenSet() const { return m_languageCodeHasBeenSet; } /** *

Optional language code for the Synthesize Speech request. This is only * necessary if using a bilingual voice, such as Aditi, which can be used for * either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is * used and no language code is specified, Amazon Polly uses the default language * of the bilingual voice. The default language for any voice is the one returned * by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline void SetLanguageCode(const LanguageCode& value) { m_languageCodeHasBeenSet = true; m_languageCode = value; } /** *

Optional language code for the Synthesize Speech request. This is only * necessary if using a bilingual voice, such as Aditi, which can be used for * either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is * used and no language code is specified, Amazon Polly uses the default language * of the bilingual voice. The default language for any voice is the one returned * by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline void SetLanguageCode(LanguageCode&& value) { m_languageCodeHasBeenSet = true; m_languageCode = std::move(value); } /** *

Optional language code for the Synthesize Speech request. This is only * necessary if using a bilingual voice, such as Aditi, which can be used for * either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is * used and no language code is specified, Amazon Polly uses the default language * of the bilingual voice. The default language for any voice is the one returned * by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline SynthesizeSpeechRequest& WithLanguageCode(const LanguageCode& value) { SetLanguageCode(value); return *this;} /** *

Optional language code for the Synthesize Speech request. This is only * necessary if using a bilingual voice, such as Aditi, which can be used for * either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is * used and no language code is specified, Amazon Polly uses the default language * of the bilingual voice. The default language for any voice is the one returned * by the DescribeVoices * operation for the LanguageCode parameter. For example, if no * language code is specified, Aditi will use Indian English rather than Hindi.

*/ inline SynthesizeSpeechRequest& WithLanguageCode(LanguageCode&& value) { SetLanguageCode(std::move(value)); return *this;} /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline const Aws::Vector& GetLexiconNames() const{ return m_lexiconNames; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline bool LexiconNamesHasBeenSet() const { return m_lexiconNamesHasBeenSet; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline void SetLexiconNames(const Aws::Vector& value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames = value; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline void SetLexiconNames(Aws::Vector&& value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames = std::move(value); } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline SynthesizeSpeechRequest& WithLexiconNames(const Aws::Vector& value) { SetLexiconNames(value); return *this;} /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline SynthesizeSpeechRequest& WithLexiconNames(Aws::Vector&& value) { SetLexiconNames(std::move(value)); return *this;} /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline SynthesizeSpeechRequest& AddLexiconNames(const Aws::String& value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames.push_back(value); return *this; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline SynthesizeSpeechRequest& AddLexiconNames(Aws::String&& value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames.push_back(std::move(value)); return *this; } /** *

List of one or more pronunciation lexicon names you want the service to apply * during synthesis. Lexicons are applied only if the language of the lexicon is * the same as the language of the voice. For information about storing lexicons, * see PutLexicon.

*/ inline SynthesizeSpeechRequest& AddLexiconNames(const char* value) { m_lexiconNamesHasBeenSet = true; m_lexiconNames.push_back(value); return *this; } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*

When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 * channel (mono), little-endian format.

*/ inline const OutputFormat& GetOutputFormat() const{ return m_outputFormat; } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*

When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 * channel (mono), little-endian format.

*/ inline bool OutputFormatHasBeenSet() const { return m_outputFormatHasBeenSet; } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*

When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 * channel (mono), little-endian format.

*/ inline void SetOutputFormat(const OutputFormat& value) { m_outputFormatHasBeenSet = true; m_outputFormat = value; } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*

When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 * channel (mono), little-endian format.

*/ inline void SetOutputFormat(OutputFormat&& value) { m_outputFormatHasBeenSet = true; m_outputFormat = std::move(value); } /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*

When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 * channel (mono), little-endian format.

*/ inline SynthesizeSpeechRequest& WithOutputFormat(const OutputFormat& value) { SetOutputFormat(value); return *this;} /** *

The format in which the returned output will be encoded. For audio stream, * this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

*

When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 * channel (mono), little-endian format.

*/ inline SynthesizeSpeechRequest& WithOutputFormat(OutputFormat&& value) { SetOutputFormat(std::move(value)); return *this;} /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline const Aws::String& GetSampleRate() const{ return m_sampleRate; } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline bool SampleRateHasBeenSet() const { return m_sampleRateHasBeenSet; } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline void SetSampleRate(const Aws::String& value) { m_sampleRateHasBeenSet = true; m_sampleRate = value; } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline void SetSampleRate(Aws::String&& value) { m_sampleRateHasBeenSet = true; m_sampleRate = std::move(value); } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline void SetSampleRate(const char* value) { m_sampleRateHasBeenSet = true; m_sampleRate.assign(value); } /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline SynthesizeSpeechRequest& WithSampleRate(const Aws::String& value) { SetSampleRate(value); return *this;} /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline SynthesizeSpeechRequest& WithSampleRate(Aws::String&& value) { SetSampleRate(std::move(value)); return *this;} /** *

The audio frequency specified in Hz.

The valid values for mp3 and * ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for * standard voices is "22050". The default value for neural voices is "24000".

*

Valid values for pcm are "8000" and "16000" The default value is "16000". *

*/ inline SynthesizeSpeechRequest& WithSampleRate(const char* value) { SetSampleRate(value); return *this;} /** *

The type of speech marks returned for the input text.

*/ inline const Aws::Vector& GetSpeechMarkTypes() const{ return m_speechMarkTypes; } /** *

The type of speech marks returned for the input text.

*/ inline bool SpeechMarkTypesHasBeenSet() const { return m_speechMarkTypesHasBeenSet; } /** *

The type of speech marks returned for the input text.

*/ inline void SetSpeechMarkTypes(const Aws::Vector& value) { m_speechMarkTypesHasBeenSet = true; m_speechMarkTypes = value; } /** *

The type of speech marks returned for the input text.

*/ inline void SetSpeechMarkTypes(Aws::Vector&& value) { m_speechMarkTypesHasBeenSet = true; m_speechMarkTypes = std::move(value); } /** *

The type of speech marks returned for the input text.

*/ inline SynthesizeSpeechRequest& WithSpeechMarkTypes(const Aws::Vector& value) { SetSpeechMarkTypes(value); return *this;} /** *

The type of speech marks returned for the input text.

*/ inline SynthesizeSpeechRequest& WithSpeechMarkTypes(Aws::Vector&& value) { SetSpeechMarkTypes(std::move(value)); return *this;} /** *

The type of speech marks returned for the input text.

*/ inline SynthesizeSpeechRequest& AddSpeechMarkTypes(const SpeechMarkType& value) { m_speechMarkTypesHasBeenSet = true; m_speechMarkTypes.push_back(value); return *this; } /** *

The type of speech marks returned for the input text.

*/ inline SynthesizeSpeechRequest& AddSpeechMarkTypes(SpeechMarkType&& value) { m_speechMarkTypesHasBeenSet = true; m_speechMarkTypes.push_back(std::move(value)); return *this; } /** *

Input text to synthesize. If you specify ssml as the * TextType, follow the SSML format for the input text.

*/ inline const Aws::String& GetText() const{ return m_text; } /** *

Input text to synthesize. If you specify ssml as the * TextType, follow the SSML format for the input text.

*/ inline bool TextHasBeenSet() const { return m_textHasBeenSet; } /** *

Input text to synthesize. If you specify ssml as the * TextType, follow the SSML format for the input text.

*/ inline void SetText(const Aws::String& value) { m_textHasBeenSet = true; m_text = value; } /** *

Input text to synthesize. If you specify ssml as the * TextType, follow the SSML format for the input text.

*/ inline void SetText(Aws::String&& value) { m_textHasBeenSet = true; m_text = std::move(value); } /** *

Input text to synthesize. If you specify ssml as the * TextType, follow the SSML format for the input text.

*/ inline void SetText(const char* value) { m_textHasBeenSet = true; m_text.assign(value); } /** *

Input text to synthesize. If you specify ssml as the * TextType, follow the SSML format for the input text.

*/ inline SynthesizeSpeechRequest& WithText(const Aws::String& value) { SetText(value); return *this;} /** *

Input text to synthesize. If you specify ssml as the * TextType, follow the SSML format for the input text.

*/ inline SynthesizeSpeechRequest& WithText(Aws::String&& value) { SetText(std::move(value)); return *this;} /** *

Input text to synthesize. If you specify ssml as the * TextType, follow the SSML format for the input text.

*/ inline SynthesizeSpeechRequest& WithText(const char* value) { SetText(value); return *this;} /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text. For more information, see Using SSML.

*/ inline const TextType& GetTextType() const{ return m_textType; } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text. For more information, see Using SSML.

*/ inline bool TextTypeHasBeenSet() const { return m_textTypeHasBeenSet; } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text. For more information, see Using SSML.

*/ inline void SetTextType(const TextType& value) { m_textTypeHasBeenSet = true; m_textType = value; } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text. For more information, see Using SSML.

*/ inline void SetTextType(TextType&& value) { m_textTypeHasBeenSet = true; m_textType = std::move(value); } /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text. For more information, see Using SSML.

*/ inline SynthesizeSpeechRequest& WithTextType(const TextType& value) { SetTextType(value); return *this;} /** *

Specifies whether the input text is plain text or SSML. The default value is * plain text. For more information, see Using SSML.

*/ inline SynthesizeSpeechRequest& WithTextType(TextType&& value) { SetTextType(std::move(value)); return *this;} /** *

Voice ID to use for the synthesis. You can get a list of available voice IDs * by calling the DescribeVoices * operation.

*/ inline const VoiceId& GetVoiceId() const{ return m_voiceId; } /** *

Voice ID to use for the synthesis. You can get a list of available voice IDs * by calling the DescribeVoices * operation.

*/ inline bool VoiceIdHasBeenSet() const { return m_voiceIdHasBeenSet; } /** *

Voice ID to use for the synthesis. You can get a list of available voice IDs * by calling the DescribeVoices * operation.

*/ inline void SetVoiceId(const VoiceId& value) { m_voiceIdHasBeenSet = true; m_voiceId = value; } /** *

Voice ID to use for the synthesis. You can get a list of available voice IDs * by calling the DescribeVoices * operation.

*/ inline void SetVoiceId(VoiceId&& value) { m_voiceIdHasBeenSet = true; m_voiceId = std::move(value); } /** *

Voice ID to use for the synthesis. You can get a list of available voice IDs * by calling the DescribeVoices * operation.

*/ inline SynthesizeSpeechRequest& WithVoiceId(const VoiceId& value) { SetVoiceId(value); return *this;} /** *

Voice ID to use for the synthesis. You can get a list of available voice IDs * by calling the DescribeVoices * operation.

*/ inline SynthesizeSpeechRequest& WithVoiceId(VoiceId&& value) { SetVoiceId(std::move(value)); return *this;} private: Engine m_engine; bool m_engineHasBeenSet = false; LanguageCode m_languageCode; bool m_languageCodeHasBeenSet = false; Aws::Vector m_lexiconNames; bool m_lexiconNamesHasBeenSet = false; OutputFormat m_outputFormat; bool m_outputFormatHasBeenSet = false; Aws::String m_sampleRate; bool m_sampleRateHasBeenSet = false; Aws::Vector m_speechMarkTypes; bool m_speechMarkTypesHasBeenSet = false; Aws::String m_text; bool m_textHasBeenSet = false; TextType m_textType; bool m_textTypeHasBeenSet = false; VoiceId m_voiceId; bool m_voiceIdHasBeenSet = false; }; } // namespace Model } // namespace Polly } // namespace Aws