use of com.amazon.blueshift.bluefront.android.audio.encoder.OpusEncoder in project aws-sdk-android by aws-amplify.
the class InteractionClient method carryOnWithMic.
/**
* Starts listening for the user to speak, through the microphones. The voice interaction client
* detects the start and end of speech.
*/
private void carryOnWithMic(final Map<String, String> sessionAttributes, final Map<String, String> requestAttributes, final ResponseType mode) {
// Ensure that the client is not pre-occupied with another dlalog
checkBusyState();
// Send user's response to Amazon Lex service as an audio-stream.
final InteractionClient client = this;
new Thread(new Runnable() {
@Override
public void run() {
final Handler handler = new Handler(context.getMainLooper());
Runnable returnCallBack;
try {
// Create a new voice interaction client.
if (AudioEncoding.LPCM.equals(interactionConfig.getAudioEncoding())) {
audioEncoder = new BufferedAudioEncoder(new L16PcmEncoder());
} else {
audioEncoder = new BufferedAudioEncoder(new OpusEncoder());
}
// Set time-out limits for mic audio.
audioTimeouts = new AudioTimeouts(interactionConfig.getNoSpeechTimeoutInterval(), interactionConfig.getMaxSpeechTimeoutInterval());
// Set VAD configuration.
vadConfig = new DnnVADConfig(interactionConfig.getLrtThreshold(), interactionConfig.getStartPointingThreshold(), interactionConfig.getEndPointingThreshold());
lexAudioRecorder = new LexAudioRecorderBuilder(context).audioEncoder(audioEncoder).audioTimeouts(audioTimeouts).dnnVADConfig(vadConfig).build();
// Calculate the maximum buffer size for pipes.
final int maxTotalAudioLengthInMills = audioTimeouts.getNoSpeechTimeout() + audioTimeouts.getMaxSpeechTimeout();
final int pipeSize = AudioRecorder.DEFAULT_SAMPLE_RATE * (int) TimeUnit.MILLISECONDS.toSeconds(maxTotalAudioLengthInMills) * (SAMPLE_SIZE / Byte.SIZE);
final InputStream audioInStream = new BufferedInputStream(lexAudioRecorder.getConsumerStream(), pipeSize);
final PostContentRequest request = CreateLexServiceRequest.generatePostContentRequest(sessionAttributes, requestAttributes, interactionConfig, credentialsProvider, mode, audioInStream, audioEncoder.getMediaType().toString());
// Start the speech listener, service api's will be called only when the speech frames are detected.
startListening(handler, microphoneListener, lexAudioRecorder, request, client, mode);
} catch (final Exception e) {
returnCallBack = new Runnable() {
@Override
public void run() {
interactionListener.onInteractionError(null, e);
}
};
handler.post(returnCallBack);
} finally {
setBusyState(NOT_BUSY);
}
}
}).start();
}
Aggregations