Search in sources :

Example 6 with SpeechRecognitionResult

use of com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult in project java-docs-samples by GoogleCloudPlatform.

the class Recognize method transcribeModelSelection.

// [START speech_transcribe_model_selection]
/**
 * Performs transcription of the given audio file synchronously with
 * the selected model.
 * @param fileName the path to a audio file to transcribe
 */
public static void transcribeModelSelection(String fileName) throws Exception {
    Path path = Paths.get(fileName);
    byte[] content = Files.readAllBytes(path);
    try (SpeechClient speech = SpeechClient.create()) {
        // Configure request with video media type
        RecognitionConfig recConfig = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16).setLanguageCode("en-US").setSampleRateHertz(16000).setModel("video").build();
        RecognitionAudio recognitionAudio = RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build();
        RecognizeResponse recognizeResponse = speech.recognize(recConfig, recognitionAudio);
        // Just print the first result here.
        SpeechRecognitionResult result = recognizeResponse.getResultsList().get(0);
        // There can be several alternative transcripts for a given chunk of speech. Just use the
        // first (most likely) one here.
        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
        System.out.printf("Transcript : %s\n", alternative.getTranscript());
    }
// [END speech_transcribe_model_selection]
}
Also used : Path(java.nio.file.Path) SpeechRecognitionAlternative(com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative) RecognitionAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio) StreamingRecognitionConfig(com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig) RecognitionConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig) SpeechClient(com.google.cloud.speech.v1p1beta1.SpeechClient) RecognizeResponse(com.google.cloud.speech.v1p1beta1.RecognizeResponse) StreamingRecognizeResponse(com.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse) LongRunningRecognizeResponse(com.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse) SpeechRecognitionResult(com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult)

Example 7 with SpeechRecognitionResult

use of com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult in project java-docs-samples by GoogleCloudPlatform.

the class Recognize method syncRecognizeWords.

/**
 * Performs sync recognize and prints word time offsets.
 *
 * @param fileName the path to a PCM audio file to transcribe get offsets on.
 */
public static void syncRecognizeWords(String fileName) throws Exception {
    try (SpeechClient speech = SpeechClient.create()) {
        Path path = Paths.get(fileName);
        byte[] data = Files.readAllBytes(path);
        ByteString audioBytes = ByteString.copyFrom(data);
        // Configure request with local raw PCM audio
        RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16).setLanguageCode("en-US").setSampleRateHertz(16000).setEnableWordTimeOffsets(true).build();
        RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
        // Use blocking call to get audio transcript
        RecognizeResponse response = speech.recognize(config, audio);
        List<SpeechRecognitionResult> results = response.getResultsList();
        for (SpeechRecognitionResult result : results) {
            // There can be several alternative transcripts for a given chunk of speech. Just use the
            // first (most likely) one here.
            SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
            System.out.printf("Transcription: %s%n", alternative.getTranscript());
            for (WordInfo wordInfo : alternative.getWordsList()) {
                System.out.println(wordInfo.getWord());
                System.out.printf("\t%s.%s sec - %s.%s sec\n", wordInfo.getStartTime().getSeconds(), wordInfo.getStartTime().getNanos() / 100000000, wordInfo.getEndTime().getSeconds(), wordInfo.getEndTime().getNanos() / 100000000);
            }
        }
    }
}
Also used : Path(java.nio.file.Path) SpeechRecognitionAlternative(com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative) RecognitionAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio) ByteString(com.google.protobuf.ByteString) StreamingRecognitionConfig(com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig) RecognitionConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig) SpeechClient(com.google.cloud.speech.v1p1beta1.SpeechClient) RecognizeResponse(com.google.cloud.speech.v1p1beta1.RecognizeResponse) StreamingRecognizeResponse(com.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse) LongRunningRecognizeResponse(com.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse) SpeechRecognitionResult(com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult) WordInfo(com.google.cloud.speech.v1p1beta1.WordInfo)

Example 8 with SpeechRecognitionResult

use of com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult in project java-docs-samples by GoogleCloudPlatform.

the class Recognize method asyncRecognizeFile.

/**
 * Performs non-blocking speech recognition on raw PCM audio and prints
 * the transcription. Note that transcription is limited to 60 seconds audio.
 *
 * @param fileName the path to a PCM audio file to transcribe.
 */
public static void asyncRecognizeFile(String fileName) throws Exception {
    // Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
    try (SpeechClient speech = SpeechClient.create()) {
        Path path = Paths.get(fileName);
        byte[] data = Files.readAllBytes(path);
        ByteString audioBytes = ByteString.copyFrom(data);
        // Configure request with local raw PCM audio
        RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16).setLanguageCode("en-US").setSampleRateHertz(16000).build();
        RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
        // Use non-blocking call for getting file transcription
        OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = speech.longRunningRecognizeAsync(config, audio);
        while (!response.isDone()) {
            System.out.println("Waiting for response...");
            Thread.sleep(10000);
        }
        List<SpeechRecognitionResult> results = response.get().getResultsList();
        for (SpeechRecognitionResult result : results) {
            // There can be several alternative transcripts for a given chunk of speech. Just use the
            // first (most likely) one here.
            SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
            System.out.printf("Transcription: %s%n", alternative.getTranscript());
        }
    }
}
Also used : Path(java.nio.file.Path) LongRunningRecognizeResponse(com.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse) SpeechRecognitionAlternative(com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative) RecognitionAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio) ByteString(com.google.protobuf.ByteString) StreamingRecognitionConfig(com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig) RecognitionConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig) SpeechClient(com.google.cloud.speech.v1p1beta1.SpeechClient) SpeechRecognitionResult(com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult) LongRunningRecognizeMetadata(com.google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata)

Example 9 with SpeechRecognitionResult

use of com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult in project java-docs-samples by GoogleCloudPlatform.

the class Recognize method syncRecognizeGcs.

/**
 * Performs speech recognition on remote FLAC file and prints the transcription.
 *
 * @param gcsUri the path to the remote FLAC audio file to transcribe.
 */
public static void syncRecognizeGcs(String gcsUri) throws Exception {
    // Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
    try (SpeechClient speech = SpeechClient.create()) {
        // Builds the request for remote FLAC file
        RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.FLAC).setLanguageCode("en-US").setSampleRateHertz(16000).build();
        RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(gcsUri).build();
        // Use blocking call for getting audio transcript
        RecognizeResponse response = speech.recognize(config, audio);
        List<SpeechRecognitionResult> results = response.getResultsList();
        for (SpeechRecognitionResult result : results) {
            // There can be several alternative transcripts for a given chunk of speech. Just use the
            // first (most likely) one here.
            SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
            System.out.printf("Transcription: %s%n", alternative.getTranscript());
        }
    }
}
Also used : SpeechRecognitionAlternative(com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative) RecognitionAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio) StreamingRecognitionConfig(com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig) RecognitionConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig) SpeechClient(com.google.cloud.speech.v1p1beta1.SpeechClient) RecognizeResponse(com.google.cloud.speech.v1p1beta1.RecognizeResponse) StreamingRecognizeResponse(com.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse) LongRunningRecognizeResponse(com.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse) SpeechRecognitionResult(com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult)

Example 10 with SpeechRecognitionResult

use of com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult in project sample-googleassistant by androidthings.

the class AssistantActivity method onCreate.

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    Log.i(TAG, "starting assistant demo");
    setContentView(R.layout.activity_main);
    ListView assistantRequestsListView = findViewById(R.id.assistantRequestsListView);
    mAssistantRequestsAdapter = new ArrayAdapter<>(this, android.R.layout.simple_list_item_1, mAssistantRequests);
    mMainHandler = new Handler(getMainLooper());
    assistantRequestsListView.setAdapter(mAssistantRequestsAdapter);
    mButtonWidget = findViewById(R.id.assistantQueryButton);
    mButtonWidget.setOnClickListener(new OnClickListener() {

        @Override
        public void onClick(View view) {
            mEmbeddedAssistant.startConversation();
        }
    });
    // Audio routing configuration: use default routing.
    AudioDeviceInfo audioInputDevice = null;
    AudioDeviceInfo audioOutputDevice = null;
    if (USE_VOICEHAT_I2S_DAC) {
        audioInputDevice = findAudioDevice(AudioManager.GET_DEVICES_INPUTS, AudioDeviceInfo.TYPE_BUS);
        if (audioInputDevice == null) {
            Log.e(TAG, "failed to find I2S audio input device, using default");
        }
        audioOutputDevice = findAudioDevice(AudioManager.GET_DEVICES_OUTPUTS, AudioDeviceInfo.TYPE_BUS);
        if (audioOutputDevice == null) {
            Log.e(TAG, "failed to found I2S audio output device, using default");
        }
    }
    try {
        if (USE_VOICEHAT_I2S_DAC) {
            Log.i(TAG, "initializing DAC trigger");
            mDac = VoiceHat.openDac();
            mDac.setSdMode(Max98357A.SD_MODE_SHUTDOWN);
            mButton = VoiceHat.openButton();
            mLed = VoiceHat.openLed();
        } else {
            PeripheralManager pioManager = PeripheralManager.getInstance();
            mButton = new Button(BoardDefaults.getGPIOForButton(), Button.LogicState.PRESSED_WHEN_LOW);
            mLed = pioManager.openGpio(BoardDefaults.getGPIOForLED());
        }
        mButton.setDebounceDelay(BUTTON_DEBOUNCE_DELAY_MS);
        mButton.setOnButtonEventListener(this);
        mLed.setDirection(Gpio.DIRECTION_OUT_INITIALLY_LOW);
        mLed.setActiveType(Gpio.ACTIVE_HIGH);
    } catch (IOException e) {
        Log.e(TAG, "error configuring peripherals:", e);
        return;
    }
    // Set volume from preferences
    SharedPreferences preferences = PreferenceManager.getDefaultSharedPreferences(this);
    int initVolume = preferences.getInt(PREF_CURRENT_VOLUME, DEFAULT_VOLUME);
    Log.i(TAG, "setting audio track volume to: " + initVolume);
    UserCredentials userCredentials = null;
    try {
        userCredentials = EmbeddedAssistant.generateCredentials(this, R.raw.credentials);
    } catch (IOException | JSONException e) {
        Log.e(TAG, "error getting user credentials", e);
    }
    mEmbeddedAssistant = new EmbeddedAssistant.Builder().setCredentials(userCredentials).setDeviceInstanceId(DEVICE_INSTANCE_ID).setDeviceModelId(DEVICE_MODEL_ID).setLanguageCode(LANGUAGE_CODE).setAudioInputDevice(audioInputDevice).setAudioOutputDevice(audioOutputDevice).setAudioSampleRate(SAMPLE_RATE).setAudioVolume(initVolume).setDeviceModelId(DEVICE_MODEL_ID).setDeviceInstanceId(DEVICE_INSTANCE_ID).setLanguageCode(LANGUAGE_CODE).setRequestCallback(new RequestCallback() {

        @Override
        public void onRequestStart() {
            Log.i(TAG, "starting assistant request, enable microphones");
            mButtonWidget.setText(R.string.button_listening);
            mButtonWidget.setEnabled(false);
        }

        @Override
        public void onSpeechRecognition(List<SpeechRecognitionResult> results) {
            for (final SpeechRecognitionResult result : results) {
                Log.i(TAG, "assistant request text: " + result.getTranscript() + " stability: " + Float.toString(result.getStability()));
                mAssistantRequestsAdapter.add(result.getTranscript());
            }
        }
    }).setConversationCallback(new ConversationCallback() {

        @Override
        public void onResponseStarted() {
            super.onResponseStarted();
            // When bus type is switched, the AudioManager needs to reset the stream volume
            if (mDac != null) {
                try {
                    mDac.setSdMode(Max98357A.SD_MODE_LEFT);
                } catch (IOException e) {
                    Log.e(TAG, "error enabling DAC", e);
                }
            }
        }

        @Override
        public void onResponseFinished() {
            super.onResponseFinished();
            if (mDac != null) {
                try {
                    mDac.setSdMode(Max98357A.SD_MODE_SHUTDOWN);
                } catch (IOException e) {
                    Log.e(TAG, "error disabling DAC", e);
                }
            }
            if (mLed != null) {
                try {
                    mLed.setValue(false);
                } catch (IOException e) {
                    Log.e(TAG, "cannot turn off LED", e);
                }
            }
        }

        @Override
        public void onError(Throwable throwable) {
            Log.e(TAG, "assist error: " + throwable.getMessage(), throwable);
        }

        @Override
        public void onVolumeChanged(int percentage) {
            Log.i(TAG, "assistant volume changed: " + percentage);
            // Update our shared preferences
            Editor editor = PreferenceManager.getDefaultSharedPreferences(AssistantActivity.this).edit();
            editor.putInt(PREF_CURRENT_VOLUME, percentage);
            editor.apply();
        }

        @Override
        public void onConversationFinished() {
            Log.i(TAG, "assistant conversation finished");
            mButtonWidget.setText(R.string.button_new_request);
            mButtonWidget.setEnabled(true);
        }

        @Override
        public void onAssistantResponse(final String response) {
            if (!response.isEmpty()) {
                mMainHandler.post(new Runnable() {

                    @Override
                    public void run() {
                        mAssistantRequestsAdapter.add("Google Assistant: " + response);
                    }
                });
            }
        }

        public void onDeviceAction(String intentName, JSONObject parameters) {
            if (parameters != null) {
                Log.d(TAG, "Get device action " + intentName + " with parameters: " + parameters.toString());
            } else {
                Log.d(TAG, "Get device action " + intentName + " with no paramete" + "rs");
            }
            if (intentName.equals("action.devices.commands.OnOff")) {
                try {
                    boolean turnOn = parameters.getBoolean("on");
                    mLed.setValue(turnOn);
                } catch (JSONException e) {
                    Log.e(TAG, "Cannot get value of command", e);
                } catch (IOException e) {
                    Log.e(TAG, "Cannot set value of LED", e);
                }
            }
        }
    }).build();
    mEmbeddedAssistant.connect();
}
Also used : SpeechRecognitionResult(com.google.assistant.embedded.v1alpha2.SpeechRecognitionResult) ListView(android.widget.ListView) Button(com.google.android.things.contrib.driver.button.Button) ArrayList(java.util.ArrayList) List(java.util.List) PeripheralManager(com.google.android.things.pio.PeripheralManager) AudioDeviceInfo(android.media.AudioDeviceInfo) SharedPreferences(android.content.SharedPreferences) Handler(android.os.Handler) JSONException(org.json.JSONException) ConversationCallback(com.example.androidthings.assistant.EmbeddedAssistant.ConversationCallback) IOException(java.io.IOException) View(android.view.View) ListView(android.widget.ListView) RequestCallback(com.example.androidthings.assistant.EmbeddedAssistant.RequestCallback) JSONObject(org.json.JSONObject) OnClickListener(android.view.View.OnClickListener) UserCredentials(com.google.auth.oauth2.UserCredentials) Editor(android.content.SharedPreferences.Editor)

Aggregations

RecognitionAudio (com.google.cloud.speech.v1p1beta1.RecognitionAudio)9 RecognitionConfig (com.google.cloud.speech.v1p1beta1.RecognitionConfig)9 SpeechClient (com.google.cloud.speech.v1p1beta1.SpeechClient)9 SpeechRecognitionAlternative (com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative)9 SpeechRecognitionResult (com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult)9 LongRunningRecognizeResponse (com.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse)8 StreamingRecognitionConfig (com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig)8 Path (java.nio.file.Path)6 RecognizeResponse (com.google.cloud.speech.v1p1beta1.RecognizeResponse)5 ByteString (com.google.protobuf.ByteString)5 LongRunningRecognizeMetadata (com.google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata)4 StreamingRecognizeResponse (com.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse)4 WordInfo (com.google.cloud.speech.v1p1beta1.WordInfo)2 SharedPreferences (android.content.SharedPreferences)1 Editor (android.content.SharedPreferences.Editor)1 AudioDeviceInfo (android.media.AudioDeviceInfo)1 Handler (android.os.Handler)1 View (android.view.View)1 OnClickListener (android.view.View.OnClickListener)1 ListView (android.widget.ListView)1