/*
* Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.lexruntimev2.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see AWS
* API Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class RecognizeUtteranceResult extends com.amazonaws.AmazonWebServiceResult
* Indicates whether the input mode to the operation was text or speech.
*
* Content type as specified in the
* A list of messages that were last sent to the user. The messages are ordered based on the order that you returned
* the messages from your Lambda function or the order that the messages are defined in the bot.
*
* The
* A list of intents that Amazon Lex V2 determined might satisfy the user's utterance.
*
* Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the
* interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in
* the utterance.
*
* The
* Represents the current state of the dialog between the user and the bot.
*
* Use this to determine the progress of the conversation and what the next action might be.
*
* The
* The attributes sent in the request.
*
* The
* The identifier of the session in use.
*
* The text used to process the request.
*
* If the input was an audio stream, the
* The
* The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if
* Amazon Lex V2 did not understand the user intent, it sends the
* The bot member that recognized the utterance.
*
* Indicates whether the input mode to the operation was text or speech.
*
* Indicates whether the input mode to the operation was text or speech.
*
* Indicates whether the input mode to the operation was text or speech.
*
* Content type as specified in the
* Content type as specified in the
* Content type as specified in the
* A list of messages that were last sent to the user. The messages are ordered based on the order that you returned
* the messages from your Lambda function or the order that the messages are defined in the bot.
*
* The responseContentType
in the request.
* messages
field is compressed with gzip and then base64 encoded. Before you can use the contents
* of the field, you must decode and decompress the contents. See the example for a simple function to decode and
* decompress the contents.
* interpretations
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
* sessionState
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
* requestAttributes
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents.
* inputTranscript
field contains the text extracted from the
* audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this
* information to determine if Amazon Lex V2 is correctly processing the audio that you send.
* inputTranscript
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
* clarificationPrompt
configured for
* the bot. If the intent requires confirmation before taking the fulfillment action, it sends the
* confirmationPrompt
. Another example: Suppose that the Lambda function successfully fulfilled the
* intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.
* responseContentType
in the request.
* responseContentType
in the request.
*/
public void setContentType(String contentType) {
this.contentType = contentType;
}
/**
* responseContentType
in the request.
* responseContentType
in the request.
*/
public String getContentType() {
return this.contentType;
}
/**
* responseContentType
in the request.
* responseContentType
in the request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RecognizeUtteranceResult withContentType(String contentType) {
setContentType(contentType);
return this;
}
/**
* messages
field is compressed with gzip and then base64 encoded. Before you can use the contents
* of the field, you must decode and decompress the contents. See the example for a simple function to decode and
* decompress the contents.
*
* The messages
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function
* to decode and decompress the contents.
*/
public void setMessages(String messages) {
this.messages = messages;
}
/**
*
* A list of messages that were last sent to the user. The messages are ordered based on the order that you returned * the messages from your Lambda function or the order that the messages are defined in the bot. *
*
* The messages
field is compressed with gzip and then base64 encoded. Before you can use the contents
* of the field, you must decode and decompress the contents. See the example for a simple function to decode and
* decompress the contents.
*
* The messages
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function
* to decode and decompress the contents.
*/
public String getMessages() {
return this.messages;
}
/**
*
* A list of messages that were last sent to the user. The messages are ordered based on the order that you returned * the messages from your Lambda function or the order that the messages are defined in the bot. *
*
* The messages
field is compressed with gzip and then base64 encoded. Before you can use the contents
* of the field, you must decode and decompress the contents. See the example for a simple function to decode and
* decompress the contents.
*
* The messages
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function
* to decode and decompress the contents.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RecognizeUtteranceResult withMessages(String messages) {
setMessages(messages);
return this;
}
/**
*
* A list of intents that Amazon Lex V2 determined might satisfy the user's utterance. *
** Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the * interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in * the utterance. *
*
* The interpretations
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the * interpretation is the correct one, and an optional sentiment response that indicates the sentiment * expressed in the utterance. *
*
* The interpretations
field is compressed with gzip and then base64 encoded. Before you can use
* the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
*/
public void setInterpretations(String interpretations) {
this.interpretations = interpretations;
}
/**
*
* A list of intents that Amazon Lex V2 determined might satisfy the user's utterance. *
** Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the * interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in * the utterance. *
*
* The interpretations
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the * interpretation is the correct one, and an optional sentiment response that indicates the sentiment * expressed in the utterance. *
*
* The interpretations
field is compressed with gzip and then base64 encoded. Before you can
* use the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
*/
public String getInterpretations() {
return this.interpretations;
}
/**
*
* A list of intents that Amazon Lex V2 determined might satisfy the user's utterance. *
** Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the * interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in * the utterance. *
*
* The interpretations
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the * interpretation is the correct one, and an optional sentiment response that indicates the sentiment * expressed in the utterance. *
*
* The interpretations
field is compressed with gzip and then base64 encoded. Before you can use
* the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RecognizeUtteranceResult withInterpretations(String interpretations) {
setInterpretations(interpretations);
return this;
}
/**
*
* Represents the current state of the dialog between the user and the bot. *
** Use this to determine the progress of the conversation and what the next action might be. *
*
* The sessionState
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* Use this to determine the progress of the conversation and what the next action might be. *
*
* The sessionState
field is compressed with gzip and then base64 encoded. Before you can use
* the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
*/
public void setSessionState(String sessionState) {
this.sessionState = sessionState;
}
/**
*
* Represents the current state of the dialog between the user and the bot. *
** Use this to determine the progress of the conversation and what the next action might be. *
*
* The sessionState
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* Use this to determine the progress of the conversation and what the next action might be. *
*
* The sessionState
field is compressed with gzip and then base64 encoded. Before you can use
* the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
*/
public String getSessionState() {
return this.sessionState;
}
/**
*
* Represents the current state of the dialog between the user and the bot. *
** Use this to determine the progress of the conversation and what the next action might be. *
*
* The sessionState
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* Use this to determine the progress of the conversation and what the next action might be. *
*
* The sessionState
field is compressed with gzip and then base64 encoded. Before you can use
* the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RecognizeUtteranceResult withSessionState(String sessionState) {
setSessionState(sessionState);
return this;
}
/**
*
* The attributes sent in the request. *
*
* The requestAttributes
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents.
*
* The requestAttributes
field is compressed with gzip and then base64 encoded. Before you can
* use the contents of the field, you must decode and decompress the contents.
*/
public void setRequestAttributes(String requestAttributes) {
this.requestAttributes = requestAttributes;
}
/**
*
* The attributes sent in the request. *
*
* The requestAttributes
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents.
*
* The requestAttributes
field is compressed with gzip and then base64 encoded. Before you can
* use the contents of the field, you must decode and decompress the contents.
*/
public String getRequestAttributes() {
return this.requestAttributes;
}
/**
*
* The attributes sent in the request. *
*
* The requestAttributes
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents.
*
* The requestAttributes
field is compressed with gzip and then base64 encoded. Before you can
* use the contents of the field, you must decode and decompress the contents.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RecognizeUtteranceResult withRequestAttributes(String requestAttributes) {
setRequestAttributes(requestAttributes);
return this;
}
/**
*
* The identifier of the session in use. *
* * @param sessionId * The identifier of the session in use. */ public void setSessionId(String sessionId) { this.sessionId = sessionId; } /** ** The identifier of the session in use. *
* * @return The identifier of the session in use. */ public String getSessionId() { return this.sessionId; } /** ** The identifier of the session in use. *
* * @param sessionId * The identifier of the session in use. * @return Returns a reference to this object so that method calls can be chained together. */ public RecognizeUtteranceResult withSessionId(String sessionId) { setSessionId(sessionId); return this; } /** ** The text used to process the request. *
*
* If the input was an audio stream, the inputTranscript
field contains the text extracted from the
* audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this
* information to determine if Amazon Lex V2 is correctly processing the audio that you send.
*
* The inputTranscript
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* If the input was an audio stream, the inputTranscript
field contains the text extracted from
* the audio stream. This is the text that is actually processed to recognize intents and slot values. You
* can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.
*
* The inputTranscript
field is compressed with gzip and then base64 encoded. Before you can use
* the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
*/
public void setInputTranscript(String inputTranscript) {
this.inputTranscript = inputTranscript;
}
/**
*
* The text used to process the request. *
*
* If the input was an audio stream, the inputTranscript
field contains the text extracted from the
* audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this
* information to determine if Amazon Lex V2 is correctly processing the audio that you send.
*
* The inputTranscript
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* If the input was an audio stream, the inputTranscript
field contains the text extracted from
* the audio stream. This is the text that is actually processed to recognize intents and slot values. You
* can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.
*
* The inputTranscript
field is compressed with gzip and then base64 encoded. Before you can
* use the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
*/
public String getInputTranscript() {
return this.inputTranscript;
}
/**
*
* The text used to process the request. *
*
* If the input was an audio stream, the inputTranscript
field contains the text extracted from the
* audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this
* information to determine if Amazon Lex V2 is correctly processing the audio that you send.
*
* The inputTranscript
field is compressed with gzip and then base64 encoded. Before you can use the
* contents of the field, you must decode and decompress the contents. See the example for a simple function to
* decode and decompress the contents.
*
* If the input was an audio stream, the inputTranscript
field contains the text extracted from
* the audio stream. This is the text that is actually processed to recognize intents and slot values. You
* can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.
*
* The inputTranscript
field is compressed with gzip and then base64 encoded. Before you can use
* the contents of the field, you must decode and decompress the contents. See the example for a simple
* function to decode and decompress the contents.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RecognizeUtteranceResult withInputTranscript(String inputTranscript) {
setInputTranscript(inputTranscript);
return this;
}
/**
*
* The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if
* Amazon Lex V2 did not understand the user intent, it sends the clarificationPrompt
configured for
* the bot. If the intent requires confirmation before taking the fulfillment action, it sends the
* confirmationPrompt
. Another example: Suppose that the Lambda function successfully fulfilled the
* intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.
*
clarificationPrompt
configured for the bot. If the intent requires confirmation before taking
* the fulfillment action, it sends the confirmationPrompt
. Another example: Suppose that the
* Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon
* Lex V2 sends that message in the response.
*/
public void setAudioStream(java.io.InputStream audioStream) {
this.audioStream = audioStream;
}
/**
*
* The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if
* Amazon Lex V2 did not understand the user intent, it sends the clarificationPrompt
configured for
* the bot. If the intent requires confirmation before taking the fulfillment action, it sends the
* confirmationPrompt
. Another example: Suppose that the Lambda function successfully fulfilled the
* intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.
*
clarificationPrompt
configured for the bot. If the intent requires confirmation before
* taking the fulfillment action, it sends the confirmationPrompt
. Another example: Suppose
* that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user.
* Then Amazon Lex V2 sends that message in the response.
*/
public java.io.InputStream getAudioStream() {
return this.audioStream;
}
/**
*
* The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if
* Amazon Lex V2 did not understand the user intent, it sends the clarificationPrompt
configured for
* the bot. If the intent requires confirmation before taking the fulfillment action, it sends the
* confirmationPrompt
. Another example: Suppose that the Lambda function successfully fulfilled the
* intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.
*
clarificationPrompt
configured for the bot. If the intent requires confirmation before taking
* the fulfillment action, it sends the confirmationPrompt
. Another example: Suppose that the
* Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon
* Lex V2 sends that message in the response.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RecognizeUtteranceResult withAudioStream(java.io.InputStream audioStream) {
setAudioStream(audioStream);
return this;
}
/**
* * The bot member that recognized the utterance. *
* * @param recognizedBotMember * The bot member that recognized the utterance. */ public void setRecognizedBotMember(String recognizedBotMember) { this.recognizedBotMember = recognizedBotMember; } /** ** The bot member that recognized the utterance. *
* * @return The bot member that recognized the utterance. */ public String getRecognizedBotMember() { return this.recognizedBotMember; } /** ** The bot member that recognized the utterance. *
* * @param recognizedBotMember * The bot member that recognized the utterance. * @return Returns a reference to this object so that method calls can be chained together. */ public RecognizeUtteranceResult withRecognizedBotMember(String recognizedBotMember) { setRecognizedBotMember(recognizedBotMember); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getInputMode() != null) sb.append("InputMode: ").append(getInputMode()).append(","); if (getContentType() != null) sb.append("ContentType: ").append(getContentType()).append(","); if (getMessages() != null) sb.append("Messages: ").append(getMessages()).append(","); if (getInterpretations() != null) sb.append("Interpretations: ").append(getInterpretations()).append(","); if (getSessionState() != null) sb.append("SessionState: ").append(getSessionState()).append(","); if (getRequestAttributes() != null) sb.append("RequestAttributes: ").append(getRequestAttributes()).append(","); if (getSessionId() != null) sb.append("SessionId: ").append(getSessionId()).append(","); if (getInputTranscript() != null) sb.append("InputTranscript: ").append(getInputTranscript()).append(","); if (getAudioStream() != null) sb.append("AudioStream: ").append(getAudioStream()).append(","); if (getRecognizedBotMember() != null) sb.append("RecognizedBotMember: ").append(getRecognizedBotMember()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof RecognizeUtteranceResult == false) return false; RecognizeUtteranceResult other = (RecognizeUtteranceResult) obj; if (other.getInputMode() == null ^ this.getInputMode() == null) return false; if (other.getInputMode() != null && other.getInputMode().equals(this.getInputMode()) == false) return false; if (other.getContentType() == null ^ this.getContentType() == null) return false; if (other.getContentType() != null && other.getContentType().equals(this.getContentType()) == false) return false; if (other.getMessages() == null ^ this.getMessages() == null) return false; if (other.getMessages() != null && other.getMessages().equals(this.getMessages()) == false) return false; if (other.getInterpretations() == null ^ this.getInterpretations() == null) return false; if (other.getInterpretations() != null && other.getInterpretations().equals(this.getInterpretations()) == false) return false; if (other.getSessionState() == null ^ this.getSessionState() == null) return false; if (other.getSessionState() != null && other.getSessionState().equals(this.getSessionState()) == false) return false; if (other.getRequestAttributes() == null ^ this.getRequestAttributes() == null) return false; if (other.getRequestAttributes() != null && other.getRequestAttributes().equals(this.getRequestAttributes()) == false) return false; if (other.getSessionId() == null ^ this.getSessionId() == null) return false; if (other.getSessionId() != null && other.getSessionId().equals(this.getSessionId()) == false) return false; if (other.getInputTranscript() == null ^ this.getInputTranscript() == null) return false; if (other.getInputTranscript() != null && other.getInputTranscript().equals(this.getInputTranscript()) == false) return false; if (other.getAudioStream() == null ^ this.getAudioStream() == null) return false; if (other.getAudioStream() != null && other.getAudioStream().equals(this.getAudioStream()) == false) return false; if (other.getRecognizedBotMember() == null ^ this.getRecognizedBotMember() == null) return false; if (other.getRecognizedBotMember() != null && other.getRecognizedBotMember().equals(this.getRecognizedBotMember()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getInputMode() == null) ? 0 : getInputMode().hashCode()); hashCode = prime * hashCode + ((getContentType() == null) ? 0 : getContentType().hashCode()); hashCode = prime * hashCode + ((getMessages() == null) ? 0 : getMessages().hashCode()); hashCode = prime * hashCode + ((getInterpretations() == null) ? 0 : getInterpretations().hashCode()); hashCode = prime * hashCode + ((getSessionState() == null) ? 0 : getSessionState().hashCode()); hashCode = prime * hashCode + ((getRequestAttributes() == null) ? 0 : getRequestAttributes().hashCode()); hashCode = prime * hashCode + ((getSessionId() == null) ? 0 : getSessionId().hashCode()); hashCode = prime * hashCode + ((getInputTranscript() == null) ? 0 : getInputTranscript().hashCode()); hashCode = prime * hashCode + ((getAudioStream() == null) ? 0 : getAudioStream().hashCode()); hashCode = prime * hashCode + ((getRecognizedBotMember() == null) ? 0 : getRecognizedBotMember().hashCode()); return hashCode; } @Override public RecognizeUtteranceResult clone() { try { return (RecognizeUtteranceResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }