use of com.google.cloud.speech.v1.SpeechSettings in project java-speech by googleapis.
the class Recognize method asyncRecognizeGcs.
// [END speech_transcribe_async_word_time_offsets_gcs]
// [START speech_transcribe_async_gcs]
/**
* Performs non-blocking speech recognition on remote FLAC file and prints the transcription.
*
* @param gcsUri the path to the remote LINEAR16 audio file to transcribe.
*/
public static void asyncRecognizeGcs(String gcsUri) throws Exception {
// Configure polling algorithm
SpeechSettings.Builder speechSettings = SpeechSettings.newBuilder();
TimedRetryAlgorithm timedRetryAlgorithm = OperationTimedPollAlgorithm.create(RetrySettings.newBuilder().setInitialRetryDelay(Duration.ofMillis(500L)).setRetryDelayMultiplier(1.5).setMaxRetryDelay(Duration.ofMillis(5000L)).setInitialRpcTimeout(// ignored
Duration.ZERO).setRpcTimeoutMultiplier(// ignored
1.0).setMaxRpcTimeout(// ignored
Duration.ZERO).setTotalTimeout(// set polling timeout to 24 hours
Duration.ofHours(24L)).build());
speechSettings.longRunningRecognizeOperationSettings().setPollingAlgorithm(timedRetryAlgorithm);
// Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
try (SpeechClient speech = SpeechClient.create(speechSettings.build())) {
// Configure remote file request for FLAC
RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.FLAC).setLanguageCode("en-US").setSampleRateHertz(16000).build();
RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(gcsUri).build();
// Use non-blocking call for getting file transcription
OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = speech.longRunningRecognizeAsync(config, audio);
while (!response.isDone()) {
System.out.println("Waiting for response...");
Thread.sleep(10000);
}
List<SpeechRecognitionResult> results = response.get().getResultsList();
for (SpeechRecognitionResult result : results) {
// There can be several alternative transcripts for a given chunk of speech. Just use the
// first (most likely) one here.
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
System.out.printf("Transcription: %s\n", alternative.getTranscript());
}
}
}
use of com.google.cloud.speech.v1.SpeechSettings in project java-speech by googleapis.
the class SpeechTranscribeMultiRegion method speechTranscribeMultiRegion.
/**
* Transcribe a remote audio file with multi-channel recognition
*
* @param gcsUri the path to the audio file
*/
public static void speechTranscribeMultiRegion(String gcsUri) throws Exception {
// Use the SpeechSettings to initialize the SpeechClient with the new endpoint.
String endPoint = "eu-speech.googleapis.com:443";
SpeechSettings speechSettings = SpeechSettings.newBuilder().setEndpoint(endPoint).build();
// Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
try (SpeechClient speech = SpeechClient.create(speechSettings)) {
// Configure remote file request
RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.FLAC).setLanguageCode("en-US").setSampleRateHertz(16000).build();
// Set the remote path for the audio file
RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(gcsUri).build();
// Use blocking call to get audio transcript
RecognizeResponse response = speech.recognize(config, audio);
List<SpeechRecognitionResult> results = response.getResultsList();
for (SpeechRecognitionResult result : results) {
// There can be several alternative transcripts for a given chunk of speech. Just use the
// first (most likely) one here.
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
System.out.printf("Transcription: %s\n", alternative.getTranscript());
}
}
}
Aggregations