use of com.google.cloud.dialogflow.v2.InputAudioConfig in project java-dialogflow by googleapis.
the class DetectIntentAudio method detectIntentAudio.
// DialogFlow API Detect Intent sample with audio files.
public static QueryResult detectIntentAudio(String projectId, String audioFilePath, String sessionId, String languageCode) throws IOException, ApiException {
// Instantiates a client
try (SessionsClient sessionsClient = SessionsClient.create()) {
// Set the session name using the sessionId (UUID) and projectID (my-project-id)
SessionName session = SessionName.of(projectId, sessionId);
System.out.println("Session Path: " + session.toString());
// Note: hard coding audioEncoding and sampleRateHertz for simplicity.
// Audio encoding of the audio content sent in the query request.
AudioEncoding audioEncoding = AudioEncoding.AUDIO_ENCODING_LINEAR_16;
int sampleRateHertz = 16000;
// Instructs the speech recognizer how to process the audio content.
InputAudioConfig inputAudioConfig = InputAudioConfig.newBuilder().setAudioEncoding(// audioEncoding = AudioEncoding.AUDIO_ENCODING_LINEAR_16
audioEncoding).setLanguageCode(// languageCode = "en-US"
languageCode).setSampleRateHertz(// sampleRateHertz = 16000
sampleRateHertz).build();
// Build the query with the InputAudioConfig
QueryInput queryInput = QueryInput.newBuilder().setAudioConfig(inputAudioConfig).build();
// Read the bytes from the audio file
byte[] inputAudio = Files.readAllBytes(Paths.get(audioFilePath));
// Build the DetectIntentRequest
DetectIntentRequest request = DetectIntentRequest.newBuilder().setSession(session.toString()).setQueryInput(queryInput).setInputAudio(ByteString.copyFrom(inputAudio)).build();
// Performs the detect intent request
DetectIntentResponse response = sessionsClient.detectIntent(request);
// Display the query result
QueryResult queryResult = response.getQueryResult();
System.out.println("====================");
System.out.format("Query Text: '%s'\n", queryResult.getQueryText());
System.out.format("Detected Intent: %s (confidence: %f)\n", queryResult.getIntent().getDisplayName(), queryResult.getIntentDetectionConfidence());
System.out.format("Fulfillment Text: '%s'\n", queryResult.getFulfillmentMessagesCount() > 0 ? queryResult.getFulfillmentMessages(0).getText() : "Triggered Default Fallback Intent");
return queryResult;
}
}
use of com.google.cloud.dialogflow.v2.InputAudioConfig in project java-dialogflow by googleapis.
the class DetectIntentStream method detectIntentStream.
// DialogFlow API Detect Intent sample with audio files processes as an audio stream.
static void detectIntentStream(String projectId, String audioFilePath, String sessionId) throws IOException, ApiException {
// Instantiates a client
try (SessionsClient sessionsClient = SessionsClient.create()) {
// Set the session name using the sessionId (UUID) and projectID (my-project-id)
SessionName session = SessionName.of(projectId, sessionId);
// Instructs the speech recognizer how to process the audio content.
// Note: hard coding audioEncoding and sampleRateHertz for simplicity.
// Audio encoding of the audio content sent in the query request.
InputAudioConfig inputAudioConfig = InputAudioConfig.newBuilder().setAudioEncoding(AudioEncoding.AUDIO_ENCODING_LINEAR_16).setLanguageCode(// languageCode = "en-US"
"en-US").setSampleRateHertz(// sampleRateHertz = 16000
16000).build();
// Build the query with the InputAudioConfig
QueryInput queryInput = QueryInput.newBuilder().setAudioConfig(inputAudioConfig).build();
// Create the Bidirectional stream
BidiStream<StreamingDetectIntentRequest, StreamingDetectIntentResponse> bidiStream = sessionsClient.streamingDetectIntentCallable().call();
// The first request must **only** contain the audio configuration:
bidiStream.send(StreamingDetectIntentRequest.newBuilder().setSession(session.toString()).setQueryInput(queryInput).build());
try (FileInputStream audioStream = new FileInputStream(audioFilePath)) {
// Subsequent requests must **only** contain the audio data.
// Following messages: audio chunks. We just read the file in fixed-size chunks. In reality
// you would split the user input by time.
byte[] buffer = new byte[4096];
int bytes;
while ((bytes = audioStream.read(buffer)) != -1) {
bidiStream.send(StreamingDetectIntentRequest.newBuilder().setInputAudio(ByteString.copyFrom(buffer, 0, bytes)).build());
}
}
// Tell the service you are done sending data
bidiStream.closeSend();
for (StreamingDetectIntentResponse response : bidiStream) {
QueryResult queryResult = response.getQueryResult();
System.out.println("====================");
System.out.format("Intent Display Name: %s\n", queryResult.getIntent().getDisplayName());
System.out.format("Query Text: '%s'\n", queryResult.getQueryText());
System.out.format("Detected Intent: %s (confidence: %f)\n", queryResult.getIntent().getDisplayName(), queryResult.getIntentDetectionConfidence());
System.out.format("Fulfillment Text: '%s'\n", queryResult.getFulfillmentMessagesCount() > 0 ? queryResult.getFulfillmentMessages(0).getText() : "Triggered Default Fallback Intent");
}
}
}
use of com.google.cloud.dialogflow.v2.InputAudioConfig in project java-dialogflow-cx by googleapis.
the class DetectIntentStream method detectIntentStream.
// DialogFlow API Detect Intent sample with audio files processes as an audio stream.
public static void detectIntentStream(String projectId, String locationId, String agentId, String sessionId, String audioFilePath) throws ApiException, IOException {
SessionsSettings.Builder sessionsSettingsBuilder = SessionsSettings.newBuilder();
if (locationId.equals("global")) {
sessionsSettingsBuilder.setEndpoint("dialogflow.googleapis.com:443");
} else {
sessionsSettingsBuilder.setEndpoint(locationId + "-dialogflow.googleapis.com:443");
}
SessionsSettings sessionsSettings = sessionsSettingsBuilder.build();
// Instantiates a client
try (SessionsClient sessionsClient = SessionsClient.create(sessionsSettings)) {
// Set the session name using the projectID (my-project-id), locationID (global), agentID
// (UUID), and sessionId (UUID).
// Using the same `sessionId` between requests allows continuation of the conversation.
SessionName session = SessionName.of(projectId, locationId, agentId, sessionId);
// Instructs the speech recognizer how to process the audio content.
// Note: hard coding audioEncoding and sampleRateHertz for simplicity.
// Audio encoding of the audio content sent in the query request.
InputAudioConfig inputAudioConfig = InputAudioConfig.newBuilder().setAudioEncoding(AudioEncoding.AUDIO_ENCODING_LINEAR_16).setSampleRateHertz(// sampleRateHertz = 16000
16000).build();
// Build the AudioInput with the InputAudioConfig.
AudioInput audioInput = AudioInput.newBuilder().setConfig(inputAudioConfig).build();
// Build the query with the InputAudioConfig.
QueryInput queryInput = QueryInput.newBuilder().setAudio(audioInput).setLanguageCode(// languageCode = "en-US"
"en-US").build();
// Create the Bidirectional stream
BidiStream<StreamingDetectIntentRequest, StreamingDetectIntentResponse> bidiStream = sessionsClient.streamingDetectIntentCallable().call();
// Specify sssml name and gender
VoiceSelectionParams voiceSelection = // Voices that are available https://cloud.google.com/text-to-speech/docs/voices
VoiceSelectionParams.newBuilder().setName("en-GB-Standard-A").setSsmlGender(SsmlVoiceGender.SSML_VOICE_GENDER_FEMALE).build();
SynthesizeSpeechConfig speechConfig = SynthesizeSpeechConfig.newBuilder().setVoice(voiceSelection).build();
// Setup audio config
OutputAudioConfig audioConfig = // https://cloud.google.com/dialogflow/cx/docs/reference/rpc/google.cloud.dialogflow.cx.v3#outputaudioencoding
OutputAudioConfig.newBuilder().setAudioEncoding(OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_UNSPECIFIED).setAudioEncodingValue(1).setSynthesizeSpeechConfig(speechConfig).build();
// The first request must **only** contain the audio configuration:
bidiStream.send(StreamingDetectIntentRequest.newBuilder().setSession(session.toString()).setQueryInput(queryInput).setOutputAudioConfig(audioConfig).build());
try (FileInputStream audioStream = new FileInputStream(audioFilePath)) {
// Subsequent requests must **only** contain the audio data.
// Following messages: audio chunks. We just read the file in fixed-size chunks. In reality
// you would split the user input by time.
byte[] buffer = new byte[4096];
int bytes;
while ((bytes = audioStream.read(buffer)) != -1) {
AudioInput subAudioInput = AudioInput.newBuilder().setAudio(ByteString.copyFrom(buffer, 0, bytes)).build();
QueryInput subQueryInput = QueryInput.newBuilder().setAudio(subAudioInput).setLanguageCode(// languageCode = "en-US"
"en-US").build();
bidiStream.send(StreamingDetectIntentRequest.newBuilder().setQueryInput(subQueryInput).build());
}
}
// Tell the service you are done sending data.
bidiStream.closeSend();
for (StreamingDetectIntentResponse response : bidiStream) {
QueryResult queryResult = response.getDetectIntentResponse().getQueryResult();
System.out.println("====================");
System.out.format("Query Text: '%s'\n", queryResult.getTranscript());
System.out.format("Detected Intent: %s (confidence: %f)\n", queryResult.getIntent().getDisplayName(), queryResult.getIntentDetectionConfidence());
}
}
}
Aggregations