use of org.openhab.core.voice.STTServiceHandle in project openhab-addons by openhab.
the class WatsonSTTService method recognize.
@Override
public STTServiceHandle recognize(STTListener sttListener, AudioStream audioStream, Locale locale, Set<String> set) throws STTException {
if (config.apiKey.isBlank() || config.instanceUrl.isBlank()) {
throw new STTException("service is not correctly configured");
}
String contentType = getContentType(audioStream);
if (contentType == null) {
throw new STTException("Unsupported format, unable to resolve audio content type");
}
logger.debug("Content-Type: {}", contentType);
var speechToText = new SpeechToText(new IamAuthenticator.Builder().apikey(config.apiKey).build());
speechToText.setServiceUrl(config.instanceUrl);
if (config.optOutLogging) {
speechToText.setDefaultHeaders(Map.of("X-Watson-Learning-Opt-Out", "1"));
}
RecognizeWithWebsocketsOptions wsOptions = new RecognizeWithWebsocketsOptions.Builder().audio(audioStream).contentType(contentType).redaction(config.redaction).smartFormatting(config.smartFormatting).model(locale.toLanguageTag() + "_BroadbandModel").interimResults(true).backgroundAudioSuppression(config.backgroundAudioSuppression).speechDetectorSensitivity(config.speechDetectorSensitivity).inactivityTimeout(config.inactivityTimeout).build();
final AtomicReference<@Nullable WebSocket> socketRef = new AtomicReference<>();
final AtomicBoolean aborted = new AtomicBoolean(false);
executor.submit(() -> {
int retries = 2;
while (retries > 0) {
try {
socketRef.set(speechToText.recognizeUsingWebSocket(wsOptions, new TranscriptionListener(sttListener, config, aborted)));
break;
} catch (RuntimeException e) {
var cause = e.getCause();
if (cause instanceof SSLPeerUnverifiedException) {
logger.debug("Retrying on error: {}", cause.getMessage());
retries--;
} else {
var errorMessage = e.getMessage();
logger.warn("Aborting on error: {}", errorMessage);
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(errorMessage != null ? errorMessage : "Unknown error"));
break;
}
}
}
});
return new STTServiceHandle() {
@Override
public void abort() {
if (!aborted.getAndSet(true)) {
var socket = socketRef.get();
if (socket != null) {
socket.close(1000, null);
socket.cancel();
try {
Thread.sleep(100);
} catch (InterruptedException ignored) {
}
}
}
}
};
}
use of org.openhab.core.voice.STTServiceHandle in project openhab-core by openhab.
the class DialogProcessor method abortSTT.
private void abortSTT() {
STTServiceHandle handle = sttServiceHandle;
if (handle != null) {
handle.abort();
sttServiceHandle = null;
}
isSTTServerAborting = true;
}
use of org.openhab.core.voice.STTServiceHandle in project openhab-addons by openhab.
the class GoogleSTTService method recognize.
@Override
public STTServiceHandle recognize(STTListener sttListener, AudioStream audioStream, Locale locale, Set<String> set) {
AtomicBoolean aborted = new AtomicBoolean(false);
backgroundRecognize(sttListener, audioStream, aborted, locale, set);
return new STTServiceHandle() {
@Override
public void abort() {
aborted.set(true);
}
};
}
use of org.openhab.core.voice.STTServiceHandle in project openhab-core by openhab.
the class STTServiceStub method recognize.
@Override
public STTServiceHandle recognize(STTListener sttListener, AudioStream audioStream, Locale locale, Set<String> grammars) throws STTException {
if (exceptionExpected) {
throw new STTException(EXCEPTION_MESSAGE);
} else {
if (errorExpected) {
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(ERROR_MESSAGE));
} else {
recognized = true;
sttListener.sttEventReceived(new SpeechRecognitionEvent(RECOGNIZED_TEXT, 0.75f));
}
return new STTServiceHandle() {
// this method will not be used in the tests
@Override
public void abort() {
}
};
}
}
Aggregations