// Code generated by smithy-go-codegen DO NOT EDIT. package types import ( smithydocument "github.com/aws/smithy-go/document" ) // A list of possible alternative transcriptions for the input audio. Each // alternative may contain one or more of Items , Entities , or Transcript . type Alternative struct { // Contains entities identified as personally identifiable information (PII) in // your transcription output. Entities []Entity // Contains words, phrases, or punctuation marks in your transcription output. Items []Item // Contains transcribed text. Transcript *string noSmithyDocumentSerde } // A wrapper for your audio chunks. Your audio stream consists of one or more // audio events, which consist of one or more audio chunks. For more information, // see Event stream encoding (https://docs.aws.amazon.com/transcribe/latest/dg/event-stream.html) // . type AudioEvent struct { // An audio blob that contains the next part of the audio that you want to // transcribe. The maximum audio chunk size is 32 KB. AudioChunk []byte noSmithyDocumentSerde } // An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or // WebSocket data frames. For more information, see Transcribing streaming audio (https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html) // . // // The following types satisfy this interface: // // AudioStreamMemberAudioEvent // AudioStreamMemberConfigurationEvent type AudioStream interface { isAudioStream() } // A blob of audio from your application. Your audio stream consists of one or // more audio events. For more information, see Event stream encoding (https://docs.aws.amazon.com/transcribe/latest/dg/event-stream.html) // . type AudioStreamMemberAudioEvent struct { Value AudioEvent noSmithyDocumentSerde } func (*AudioStreamMemberAudioEvent) isAudioStream() {} // Contains audio channel definitions and post-call analytics settings. type AudioStreamMemberConfigurationEvent struct { Value ConfigurationEvent noSmithyDocumentSerde } func (*AudioStreamMemberConfigurationEvent) isAudioStream() {} // Contains entities identified as personally identifiable information (PII) in // your transcription output, along with various associated attributes. Examples // include category, confidence score, content, type, and start and end times. type CallAnalyticsEntity struct { // The time, in milliseconds, from the beginning of the audio stream to the start // of the identified entity. BeginOffsetMillis *int64 // The category of information identified. For example, PII . Category *string // The confidence score associated with the identification of an entity in your // transcript. Confidence scores are values between 0 and 1. A larger value // indicates a higher probability that the identified entity correctly matches the // entity spoken in your media. Confidence *float64 // The word or words that represent the identified entity. Content *string // The time, in milliseconds, from the beginning of the audio stream to the end of // the identified entity. EndOffsetMillis *int64 // The type of PII identified. For example, NAME or CREDIT_DEBIT_NUMBER . Type *string noSmithyDocumentSerde } // A word, phrase, or punctuation mark in your Call Analytics transcription // output, along with various associated attributes, such as confidence score, // type, and start and end times. type CallAnalyticsItem struct { // The time, in milliseconds, from the beginning of the audio stream to the start // of the identified item. BeginOffsetMillis *int64 // The confidence score associated with a word or phrase in your transcript. // Confidence scores are values between 0 and 1. A larger value indicates a higher // probability that the identified item correctly matches the item spoken in your // media. Confidence *float64 // The word or punctuation that was transcribed. Content *string // The time, in milliseconds, from the beginning of the audio stream to the end of // the identified item. EndOffsetMillis *int64 // If partial result stabilization is enabled, Stable indicates whether the // specified item is stable ( true ) or if it may change when the segment is // complete ( false ). Stable *bool // The type of item identified. Options are: PRONUNCIATION (spoken words) and // PUNCTUATION . Type ItemType // Indicates whether the specified item matches a word in the vocabulary filter // included in your Call Analytics request. If true , there is a vocabulary filter // match. VocabularyFilterMatch bool noSmithyDocumentSerde } // Contains detailed information about your Call Analytics streaming session. // These details are provided in the UtteranceEvent and CategoryEvent objects. // // The following types satisfy this interface: // // CallAnalyticsTranscriptResultStreamMemberCategoryEvent // CallAnalyticsTranscriptResultStreamMemberUtteranceEvent type CallAnalyticsTranscriptResultStream interface { isCallAnalyticsTranscriptResultStream() } // Provides information on matched categories that were used to generate real-time // supervisor alerts. type CallAnalyticsTranscriptResultStreamMemberCategoryEvent struct { Value CategoryEvent noSmithyDocumentSerde } func (*CallAnalyticsTranscriptResultStreamMemberCategoryEvent) isCallAnalyticsTranscriptResultStream() { } // Contains set of transcription results from one or more audio segments, along // with additional information per your request parameters. This can include // information relating to channel definitions, partial result stabilization, // sentiment, issue detection, and other transcription-related data. type CallAnalyticsTranscriptResultStreamMemberUtteranceEvent struct { Value UtteranceEvent noSmithyDocumentSerde } func (*CallAnalyticsTranscriptResultStreamMemberUtteranceEvent) isCallAnalyticsTranscriptResultStream() { } // Provides information on any TranscriptFilterType categories that matched your // transcription output. Matches are identified for each segment upon completion of // that segment. type CategoryEvent struct { // Lists the categories that were matched in your audio segment. MatchedCategories []string // Contains information about the matched categories, including category names and // timestamps. MatchedDetails map[string]PointsOfInterest noSmithyDocumentSerde } // Makes it possible to specify which speaker is on which audio channel. For // example, if your agent is the first participant to speak, you would set // ChannelId to 0 (to indicate the first channel) and ParticipantRole to AGENT (to // indicate that it's the agent speaking). type ChannelDefinition struct { // Specify the audio channel you want to define. // // This member is required. ChannelId int32 // Specify the speaker you want to define. Omitting this parameter is equivalent // to specifying both participants. // // This member is required. ParticipantRole ParticipantRole noSmithyDocumentSerde } // Provides the location, using character count, in your transcript where a match // is identified. For example, the location of an issue or a category match within // a segment. type CharacterOffsets struct { // Provides the character count of the first character where a match is // identified. For example, the first character associated with an issue or a // category match in a segment transcript. Begin *int32 // Provides the character count of the last character where a match is identified. // For example, the last character associated with an issue or a category match in // a segment transcript. End *int32 noSmithyDocumentSerde } // Allows you to set audio channel definitions and post-call analytics settings. type ConfigurationEvent struct { // Indicates which speaker is on which audio channel. ChannelDefinitions []ChannelDefinition // Provides additional optional settings for your Call Analytics post-call // request, including encryption and output locations for your redacted and // unredacted transcript. PostCallAnalyticsSettings *PostCallAnalyticsSettings noSmithyDocumentSerde } // Contains entities identified as personally identifiable information (PII) in // your transcription output, along with various associated attributes. Examples // include category, confidence score, type, stability score, and start and end // times. type Entity struct { // The category of information identified. The only category is PII . Category *string // The confidence score associated with the identified PII entity in your audio. // Confidence scores are values between 0 and 1. A larger value indicates a higher // probability that the identified entity correctly matches the entity spoken in // your media. Confidence *float64 // The word or words identified as PII. Content *string // The end time, in milliseconds, of the utterance that was identified as PII. EndTime float64 // The start time, in milliseconds, of the utterance that was identified as PII. StartTime float64 // The type of PII identified. For example, NAME or CREDIT_DEBIT_NUMBER . Type *string noSmithyDocumentSerde } // Lists the issues that were identified in your audio segment. type IssueDetected struct { // Provides the timestamps that identify when in an audio segment the specified // issue occurs. CharacterOffsets *CharacterOffsets noSmithyDocumentSerde } // A word, phrase, or punctuation mark in your transcription output, along with // various associated attributes, such as confidence score, type, and start and end // times. type Item struct { // The confidence score associated with a word or phrase in your transcript. // Confidence scores are values between 0 and 1. A larger value indicates a higher // probability that the identified item correctly matches the item spoken in your // media. Confidence *float64 // The word or punctuation that was transcribed. Content *string // The end time, in milliseconds, of the transcribed item. EndTime float64 // If speaker partitioning is enabled, Speaker labels the speaker of the specified // item. Speaker *string // If partial result stabilization is enabled, Stable indicates whether the // specified item is stable ( true ) or if it may change when the segment is // complete ( false ). Stable *bool // The start time, in milliseconds, of the transcribed item. StartTime float64 // The type of item identified. Options are: PRONUNCIATION (spoken words) and // PUNCTUATION . Type ItemType // Indicates whether the specified item matches a word in the vocabulary filter // included in your request. If true , there is a vocabulary filter match. VocabularyFilterMatch bool noSmithyDocumentSerde } // The language code that represents the language identified in your audio, // including the associated confidence score. If you enabled channel identification // in your request and each channel contained a different language, you will have // more than one LanguageWithScore result. type LanguageWithScore struct { // The language code of the identified language. LanguageCode LanguageCode // The confidence score associated with the identified language code. Confidence // scores are values between zero and one; larger values indicate a higher // confidence in the identified language. Score float64 noSmithyDocumentSerde } // A list of possible alternative transcriptions for the input audio. Each // alternative may contain one or more of Items , Entities , or Transcript . type MedicalAlternative struct { // Contains entities identified as personal health information (PHI) in your // transcription output. Entities []MedicalEntity // Contains words, phrases, or punctuation marks in your transcription output. Items []MedicalItem // Contains transcribed text. Transcript *string noSmithyDocumentSerde } // Contains entities identified as personal health information (PHI) in your // transcription output, along with various associated attributes. Examples include // category, confidence score, type, stability score, and start and end times. type MedicalEntity struct { // The category of information identified. The only category is PHI . Category *string // The confidence score associated with the identified PHI entity in your audio. // Confidence scores are values between 0 and 1. A larger value indicates a higher // probability that the identified entity correctly matches the entity spoken in // your media. Confidence *float64 // The word or words identified as PHI. Content *string // The end time, in milliseconds, of the utterance that was identified as PHI. EndTime float64 // The start time, in milliseconds, of the utterance that was identified as PHI. StartTime float64 noSmithyDocumentSerde } // A word, phrase, or punctuation mark in your transcription output, along with // various associated attributes, such as confidence score, type, and start and end // times. type MedicalItem struct { // The confidence score associated with a word or phrase in your transcript. // Confidence scores are values between 0 and 1. A larger value indicates a higher // probability that the identified item correctly matches the item spoken in your // media. Confidence *float64 // The word or punctuation that was transcribed. Content *string // The end time, in milliseconds, of the transcribed item. EndTime float64 // If speaker partitioning is enabled, Speaker labels the speaker of the specified // item. Speaker *string // The start time, in milliseconds, of the transcribed item. StartTime float64 // The type of item identified. Options are: PRONUNCIATION (spoken words) and // PUNCTUATION . Type ItemType noSmithyDocumentSerde } // The Result associated with a . Contains a set of transcription results from one // or more audio segments, along with additional information per your request // parameters. This can include information relating to alternative transcriptions, // channel identification, partial result stabilization, language identification, // and other transcription-related data. type MedicalResult struct { // A list of possible alternative transcriptions for the input audio. Each // alternative may contain one or more of Items , Entities , or Transcript . Alternatives []MedicalAlternative // Indicates the channel identified for the Result . ChannelId *string // The end time, in milliseconds, of the Result . EndTime float64 // Indicates if the segment is complete. If IsPartial is true , the segment is not // complete. If IsPartial is false , the segment is complete. IsPartial bool // Provides a unique identifier for the Result . ResultId *string // The start time, in milliseconds, of the Result . StartTime float64 noSmithyDocumentSerde } // The MedicalTranscript associated with a . MedicalTranscript contains Results , // which contains a set of transcription results from one or more audio segments, // along with additional information per your request parameters. type MedicalTranscript struct { // Contains a set of transcription results from one or more audio segments, along // with additional information per your request parameters. This can include // information relating to alternative transcriptions, channel identification, // partial result stabilization, language identification, and other // transcription-related data. Results []MedicalResult noSmithyDocumentSerde } // The MedicalTranscriptEvent associated with a MedicalTranscriptResultStream . // Contains a set of transcription results from one or more audio segments, along // with additional information per your request parameters. type MedicalTranscriptEvent struct { // Contains Results , which contains a set of transcription results from one or // more audio segments, along with additional information per your request // parameters. This can include information relating to alternative transcriptions, // channel identification, partial result stabilization, language identification, // and other transcription-related data. Transcript *MedicalTranscript noSmithyDocumentSerde } // Contains detailed information about your streaming session. // // The following types satisfy this interface: // // MedicalTranscriptResultStreamMemberTranscriptEvent type MedicalTranscriptResultStream interface { isMedicalTranscriptResultStream() } // The MedicalTranscriptEvent associated with a MedicalTranscriptResultStream . // Contains a set of transcription results from one or more audio segments, along // with additional information per your request parameters. This can include // information relating to alternative transcriptions, channel identification, // partial result stabilization, language identification, and other // transcription-related data. type MedicalTranscriptResultStreamMemberTranscriptEvent struct { Value MedicalTranscriptEvent noSmithyDocumentSerde } func (*MedicalTranscriptResultStreamMemberTranscriptEvent) isMedicalTranscriptResultStream() {} // Contains the timestamps of matched categories. type PointsOfInterest struct { // Contains the timestamp ranges (start time through end time) of matched // categories and rules. TimestampRanges []TimestampRange noSmithyDocumentSerde } // Allows you to specify additional settings for your streaming Call Analytics // post-call request, including output locations for your redacted and unredacted // transcript, which IAM role to use, and, optionally, which encryption key to use. // ContentRedactionOutput , DataAccessRoleArn , and OutputLocation are required // fields. type PostCallAnalyticsSettings struct { // The Amazon Resource Name (ARN) of an IAM role that has permissions to access // the Amazon S3 bucket that contains your input files. If the role that you // specify doesn’t have the appropriate permissions to access the specified Amazon // S3 location, your request fails. IAM role ARNs have the format // arn:partition:iam::account:role/role-name-with-path . For example: // arn:aws:iam::111122223333:role/Admin . For more information, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns) // . // // This member is required. DataAccessRoleArn *string // The Amazon S3 location where you want your Call Analytics post-call // transcription output stored. You can use any of the following formats to specify // the output location: // - s3://DOC-EXAMPLE-BUCKET // - s3://DOC-EXAMPLE-BUCKET/my-output-folder/ // - s3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json // // This member is required. OutputLocation *string // Specify whether you want only a redacted transcript or both a redacted and an // unredacted transcript. If you choose redacted and unredacted, two JSON files are // generated and stored in the Amazon S3 output location you specify. Note that to // include ContentRedactionOutput in your request, you must enable content // redaction ( ContentRedactionType ). ContentRedactionOutput ContentRedactionOutput // The KMS key you want to use to encrypt your Call Analytics post-call output. If // using a key located in the current Amazon Web Services account, you can specify // your KMS key in one of four ways: // - Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab // . // - Use an alias for the KMS key ID. For example, alias/ExampleAlias . // - Use the Amazon Resource Name (ARN) for the KMS key ID. For example, // arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab . // - Use the ARN for the KMS key alias. For example, // arn:aws:kms:region:account-ID:alias/ExampleAlias . // If using a key located in a different Amazon Web Services account than the // current Amazon Web Services account, you can specify your KMS key in one of two // ways: // - Use the ARN for the KMS key ID. For example, // arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab . // - Use the ARN for the KMS key alias. For example, // arn:aws:kms:region:account-ID:alias/ExampleAlias . // Note that the user making the request must have permission to use the specified // KMS key. OutputEncryptionKMSKeyId *string noSmithyDocumentSerde } // The Result associated with a . Contains a set of transcription results from one // or more audio segments, along with additional information per your request // parameters. This can include information relating to alternative transcriptions, // channel identification, partial result stabilization, language identification, // and other transcription-related data. type Result struct { // A list of possible alternative transcriptions for the input audio. Each // alternative may contain one or more of Items , Entities , or Transcript . Alternatives []Alternative // Indicates which audio channel is associated with the Result . ChannelId *string // The end time, in milliseconds, of the Result . EndTime float64 // Indicates if the segment is complete. If IsPartial is true , the segment is not // complete. If IsPartial is false , the segment is complete. IsPartial bool // The language code that represents the language spoken in your audio stream. LanguageCode LanguageCode // The language code of the dominant language identified in your stream. If you // enabled channel identification and each channel of your audio contains a // different language, you may have more than one result. LanguageIdentification []LanguageWithScore // Provides a unique identifier for the Result . ResultId *string // The start time, in milliseconds, of the Result . StartTime float64 noSmithyDocumentSerde } // Contains the timestamp range (start time through end time) of a matched // category. type TimestampRange struct { // The time, in milliseconds, from the beginning of the audio stream to the start // of the category match. BeginOffsetMillis *int64 // The time, in milliseconds, from the beginning of the audio stream to the end of // the category match. EndOffsetMillis *int64 noSmithyDocumentSerde } // The Transcript associated with a . Transcript contains Results , which contains // a set of transcription results from one or more audio segments, along with // additional information per your request parameters. type Transcript struct { // Contains a set of transcription results from one or more audio segments, along // with additional information per your request parameters. This can include // information relating to alternative transcriptions, channel identification, // partial result stabilization, language identification, and other // transcription-related data. Results []Result noSmithyDocumentSerde } // The TranscriptEvent associated with a TranscriptResultStream . Contains a set of // transcription results from one or more audio segments, along with additional // information per your request parameters. type TranscriptEvent struct { // Contains Results , which contains a set of transcription results from one or // more audio segments, along with additional information per your request // parameters. This can include information relating to alternative transcriptions, // channel identification, partial result stabilization, language identification, // and other transcription-related data. Transcript *Transcript noSmithyDocumentSerde } // Contains detailed information about your streaming session. // // The following types satisfy this interface: // // TranscriptResultStreamMemberTranscriptEvent type TranscriptResultStream interface { isTranscriptResultStream() } // Contains Transcript , which contains Results . The object contains a set of // transcription results from one or more audio segments, along with additional // information per your request parameters. type TranscriptResultStreamMemberTranscriptEvent struct { Value TranscriptEvent noSmithyDocumentSerde } func (*TranscriptResultStreamMemberTranscriptEvent) isTranscriptResultStream() {} // Contains set of transcription results from one or more audio segments, along // with additional information about the parameters included in your request. For // example, channel definitions, partial result stabilization, sentiment, and issue // detection. type UtteranceEvent struct { // The time, in milliseconds, from the beginning of the audio stream to the start // of the UtteranceEvent . BeginOffsetMillis *int64 // The time, in milliseconds, from the beginning of the audio stream to the start // of the UtteranceEvent . EndOffsetMillis *int64 // Contains entities identified as personally identifiable information (PII) in // your transcription output. Entities []CallAnalyticsEntity // Indicates whether the segment in the UtteranceEvent is complete ( FALSE ) or // partial ( TRUE ). IsPartial bool // Provides the issue that was detected in the specified segment. IssuesDetected []IssueDetected // Contains words, phrases, or punctuation marks that are associated with the // specified UtteranceEvent . Items []CallAnalyticsItem // Provides the role of the speaker for each audio channel, either CUSTOMER or // AGENT . ParticipantRole ParticipantRole // Provides the sentiment that was detected in the specified segment. Sentiment Sentiment // Contains transcribed text. Transcript *string // The unique identifier that is associated with the specified UtteranceEvent . UtteranceId *string noSmithyDocumentSerde } type noSmithyDocumentSerde = smithydocument.NoSerde // UnknownUnionMember is returned when a union member is returned over the wire, // but has an unknown tag. type UnknownUnionMember struct { Tag string Value []byte noSmithyDocumentSerde } func (*UnknownUnionMember) isAudioStream() {} func (*UnknownUnionMember) isCallAnalyticsTranscriptResultStream() {} func (*UnknownUnionMember) isMedicalTranscriptResultStream() {} func (*UnknownUnionMember) isTranscriptResultStream() {}