Search in sources :

Example 1 with StreamingRecognitionResult

use of com.google.cloud.speech.v1beta1.StreamingRecognitionResult in project Saiy-PS by brandall76.

the class RecognitionGoogleCloud method onNext.

/**
 * Receives a value from the stream.
 * <p>
 * <p>Can be called many times but is never called after {@link #onError(Throwable)} or {@link
 * #onCompleted()} are called.
 * <p>
 * <p>Unary calls must invoke onNext at most once.  Clients may invoke onNext at most once for
 * server streaming calls, but may receive many onNext callbacks.  Servers may invoke onNext at
 * most once for client streaming calls, but may receive many onNext callbacks.
 * <p>
 * <p>If an exception is thrown by an implementation the caller is expected to terminate the
 * stream by calling {@link #onError(Throwable)} with the caught exception prior to
 * propagating it.
 *
 * @param value the value passed to the stream
 */
@Override
public void onNext(final StreamingRecognizeResponse value) {
    if (DEBUG) {
        MyLog.i(CLS_NAME, "onNext: " + TextFormat.printToString(value));
    }
    final StreamingRecognizeResponse.EndpointerType endpointerType = value.getEndpointerType();
    switch(endpointerType) {
        case START_OF_SPEECH:
            if (DEBUG) {
                MyLog.i(CLS_NAME, "onNext: START_OF_SPEECH");
            }
            if (doBeginning.get()) {
                doBeginning.set(false);
                listener.onBeginningOfSpeech();
            }
            break;
        case END_OF_SPEECH:
            if (DEBUG) {
                MyLog.i(CLS_NAME, "onNext: END_OF_SPEECH");
            }
            if (doEnd.get()) {
                doEnd.set(false);
                stopListening();
            }
            break;
        case END_OF_AUDIO:
            if (DEBUG) {
                MyLog.i(CLS_NAME, "onNext: END_OF_AUDIO");
            }
            if (doEnd.get()) {
                doEnd.set(false);
                stopListening();
            }
            break;
        case END_OF_UTTERANCE:
            if (DEBUG) {
                MyLog.i(CLS_NAME, "onNext: END_OF_UTTERANCE");
            }
            if (doEnd.get()) {
                doEnd.set(false);
                stopListening();
            }
            break;
        case UNRECOGNIZED:
            if (DEBUG) {
                MyLog.i(CLS_NAME, "onNext: UNRECOGNIZED");
            }
            break;
        case ENDPOINTER_EVENT_UNSPECIFIED:
        default:
            if (DEBUG) {
                MyLog.i(CLS_NAME, "onNext: ENDPOINTER_EVENT_UNSPECIFIED");
            }
            break;
    }
    if (doResults.get()) {
        if (UtilsList.notNaked(value.getResultsList())) {
            partialArray.clear();
            resultsArray.clear();
            confidenceArray.clear();
            bundle.clear();
            boolean isFinal = false;
            for (final StreamingRecognitionResult recognitionResult : value.getResultsList()) {
                if (DEBUG) {
                    MyLog.i(CLS_NAME, "recognitionResult stability: " + recognitionResult.getStability());
                }
                isFinal = recognitionResult.getIsFinal();
                if (DEBUG) {
                    MyLog.i(CLS_NAME, "isFinal: " + isFinal);
                }
                for (final SpeechRecognitionAlternative alternative : recognitionResult.getAlternativesList()) {
                    if (DEBUG) {
                        MyLog.i(CLS_NAME, "alternative: " + alternative.getTranscript());
                    }
                    if (isFinal) {
                        resultsArray.add(alternative.getTranscript());
                        confidenceArray.add(alternative.getConfidence());
                    } else {
                        if (partialArray.isEmpty()) {
                            partialArray.add(alternative.getTranscript());
                        } else {
                            partialArray.add(partialArray.get(0) + " " + alternative.getTranscript());
                        }
                    }
                }
            }
            doResults.set(!isFinal);
            if (isFinal) {
                bundle.putStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION, resultsArray);
                bundle.putFloatArray(SpeechRecognizer.CONFIDENCE_SCORES, ArrayUtils.toPrimitive(confidenceArray.toArray(new Float[0]), 0.0F));
                listener.onResults(bundle);
                stopListening();
            } else {
                bundle.putStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION, partialArray);
                listener.onPartialResults(bundle);
            }
        } else {
            if (DEBUG) {
                MyLog.i(CLS_NAME, "onNext: results list naked");
            }
        }
    } else {
        if (DEBUG) {
            MyLog.i(CLS_NAME, "onNext: doResults false");
        }
    }
}
Also used : SpeechRecognitionAlternative(com.google.cloud.speech.v1beta1.SpeechRecognitionAlternative) StreamingRecognitionResult(com.google.cloud.speech.v1beta1.StreamingRecognitionResult) StreamingRecognizeResponse(com.google.cloud.speech.v1beta1.StreamingRecognizeResponse)

Example 2 with StreamingRecognitionResult

use of com.google.cloud.speech.v1beta1.StreamingRecognitionResult in project java-docs-samples by GoogleCloudPlatform.

the class Recognize method streamingRecognizeFile.

/**
 * Performs streaming speech recognition on raw PCM audio data.
 *
 * @param fileName the path to a PCM audio file to transcribe.
 */
public static void streamingRecognizeFile(String fileName) throws Exception, IOException {
    Path path = Paths.get(fileName);
    byte[] data = Files.readAllBytes(path);
    // Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
    try (SpeechClient speech = SpeechClient.create()) {
        // Configure request with local raw PCM audio
        RecognitionConfig recConfig = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16).setLanguageCode("en-US").setSampleRateHertz(16000).setModel("default").build();
        StreamingRecognitionConfig config = StreamingRecognitionConfig.newBuilder().setConfig(recConfig).build();
        class ResponseApiStreamingObserver<T> implements ApiStreamObserver<T> {

            private final SettableFuture<List<T>> future = SettableFuture.create();

            private final List<T> messages = new java.util.ArrayList<T>();

            @Override
            public void onNext(T message) {
                messages.add(message);
            }

            @Override
            public void onError(Throwable t) {
                future.setException(t);
            }

            @Override
            public void onCompleted() {
                future.set(messages);
            }

            // Returns the SettableFuture object to get received messages / exceptions.
            public SettableFuture<List<T>> future() {
                return future;
            }
        }
        ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver = new ResponseApiStreamingObserver<>();
        BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse> callable = speech.streamingRecognizeCallable();
        ApiStreamObserver<StreamingRecognizeRequest> requestObserver = callable.bidiStreamingCall(responseObserver);
        // The first request must **only** contain the audio configuration:
        requestObserver.onNext(StreamingRecognizeRequest.newBuilder().setStreamingConfig(config).build());
        // Subsequent requests must **only** contain the audio data.
        requestObserver.onNext(StreamingRecognizeRequest.newBuilder().setAudioContent(ByteString.copyFrom(data)).build());
        // Mark transmission as completed after sending the data.
        requestObserver.onCompleted();
        List<StreamingRecognizeResponse> responses = responseObserver.future().get();
        for (StreamingRecognizeResponse response : responses) {
            // For streaming recognize, the results list has one is_final result (if available) followed
            // by a number of in-progress results (if iterim_results is true) for subsequent utterances.
            // Just print the first result here.
            StreamingRecognitionResult result = response.getResultsList().get(0);
            // There can be several alternative transcripts for a given chunk of speech. Just use the
            // first (most likely) one here.
            SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
            System.out.printf("Transcript : %s\n", alternative.getTranscript());
        }
    }
}
Also used : Path(java.nio.file.Path) SettableFuture(com.google.common.util.concurrent.SettableFuture) StreamingRecognitionConfig(com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig) SpeechRecognitionAlternative(com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative) StreamingRecognitionResult(com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult) StreamingRecognizeRequest(com.google.cloud.speech.v1p1beta1.StreamingRecognizeRequest) ApiStreamObserver(com.google.api.gax.rpc.ApiStreamObserver) StreamingRecognitionConfig(com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig) RecognitionConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig) SpeechClient(com.google.cloud.speech.v1p1beta1.SpeechClient) List(java.util.List) StreamingRecognizeResponse(com.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse)

Aggregations

ApiStreamObserver (com.google.api.gax.rpc.ApiStreamObserver)1 SpeechRecognitionAlternative (com.google.cloud.speech.v1beta1.SpeechRecognitionAlternative)1 StreamingRecognitionResult (com.google.cloud.speech.v1beta1.StreamingRecognitionResult)1 StreamingRecognizeResponse (com.google.cloud.speech.v1beta1.StreamingRecognizeResponse)1 RecognitionConfig (com.google.cloud.speech.v1p1beta1.RecognitionConfig)1 SpeechClient (com.google.cloud.speech.v1p1beta1.SpeechClient)1 SpeechRecognitionAlternative (com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative)1 StreamingRecognitionConfig (com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig)1 StreamingRecognitionResult (com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult)1 StreamingRecognizeRequest (com.google.cloud.speech.v1p1beta1.StreamingRecognizeRequest)1 StreamingRecognizeResponse (com.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse)1 SettableFuture (com.google.common.util.concurrent.SettableFuture)1 Path (java.nio.file.Path)1 List (java.util.List)1