use of com.google.assistant.embedded.v1alpha2.SpeechRecognitionResult in project google-cloud-java by GoogleCloudPlatform.
the class RecognizeSpeech method main.
public static void main(String... args) throws Exception {
// Instantiates a client
SpeechClient speech = SpeechClient.create();
// The path to the audio file to transcribe
// for example "./resources/audio.raw";
String fileName = "your/speech/audio/file.raw";
// Reads the audio file into memory
Path path = Paths.get(fileName);
byte[] data = Files.readAllBytes(path);
ByteString audioBytes = ByteString.copyFrom(data);
// Builds the sync recognize request
RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16).setSampleRateHertz(16000).setLanguageCode("en-US").build();
RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
// Performs speech recognition on the audio file
RecognizeResponse response = speech.recognize(config, audio);
List<SpeechRecognitionResult> results = response.getResultsList();
for (SpeechRecognitionResult result : results) {
List<SpeechRecognitionAlternative> alternatives = result.getAlternativesList();
for (SpeechRecognitionAlternative alternative : alternatives) {
System.out.printf("Transcription: %s%n", alternative.getTranscript());
}
}
speech.close();
}
use of com.google.assistant.embedded.v1alpha2.SpeechRecognitionResult in project java-docs-samples by GoogleCloudPlatform.
the class Recognize method transcribeModelSelection.
// [START speech_transcribe_model_selection]
/**
* Performs transcription of the given audio file synchronously with
* the selected model.
* @param fileName the path to a audio file to transcribe
*/
public static void transcribeModelSelection(String fileName) throws Exception {
Path path = Paths.get(fileName);
byte[] content = Files.readAllBytes(path);
try (SpeechClient speech = SpeechClient.create()) {
// Configure request with video media type
RecognitionConfig recConfig = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16).setLanguageCode("en-US").setSampleRateHertz(16000).setModel("video").build();
RecognitionAudio recognitionAudio = RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build();
RecognizeResponse recognizeResponse = speech.recognize(recConfig, recognitionAudio);
// Just print the first result here.
SpeechRecognitionResult result = recognizeResponse.getResultsList().get(0);
// There can be several alternative transcripts for a given chunk of speech. Just use the
// first (most likely) one here.
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
System.out.printf("Transcript : %s\n", alternative.getTranscript());
}
// [END speech_transcribe_model_selection]
}
use of com.google.assistant.embedded.v1alpha2.SpeechRecognitionResult in project java-docs-samples by GoogleCloudPlatform.
the class Recognize method syncRecognizeWords.
/**
* Performs sync recognize and prints word time offsets.
*
* @param fileName the path to a PCM audio file to transcribe get offsets on.
*/
public static void syncRecognizeWords(String fileName) throws Exception {
try (SpeechClient speech = SpeechClient.create()) {
Path path = Paths.get(fileName);
byte[] data = Files.readAllBytes(path);
ByteString audioBytes = ByteString.copyFrom(data);
// Configure request with local raw PCM audio
RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16).setLanguageCode("en-US").setSampleRateHertz(16000).setEnableWordTimeOffsets(true).build();
RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
// Use blocking call to get audio transcript
RecognizeResponse response = speech.recognize(config, audio);
List<SpeechRecognitionResult> results = response.getResultsList();
for (SpeechRecognitionResult result : results) {
// There can be several alternative transcripts for a given chunk of speech. Just use the
// first (most likely) one here.
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
System.out.printf("Transcription: %s%n", alternative.getTranscript());
for (WordInfo wordInfo : alternative.getWordsList()) {
System.out.println(wordInfo.getWord());
System.out.printf("\t%s.%s sec - %s.%s sec\n", wordInfo.getStartTime().getSeconds(), wordInfo.getStartTime().getNanos() / 100000000, wordInfo.getEndTime().getSeconds(), wordInfo.getEndTime().getNanos() / 100000000);
}
}
}
}
use of com.google.assistant.embedded.v1alpha2.SpeechRecognitionResult in project java-docs-samples by GoogleCloudPlatform.
the class Recognize method asyncRecognizeFile.
/**
* Performs non-blocking speech recognition on raw PCM audio and prints
* the transcription. Note that transcription is limited to 60 seconds audio.
*
* @param fileName the path to a PCM audio file to transcribe.
*/
public static void asyncRecognizeFile(String fileName) throws Exception {
// Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
try (SpeechClient speech = SpeechClient.create()) {
Path path = Paths.get(fileName);
byte[] data = Files.readAllBytes(path);
ByteString audioBytes = ByteString.copyFrom(data);
// Configure request with local raw PCM audio
RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16).setLanguageCode("en-US").setSampleRateHertz(16000).build();
RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
// Use non-blocking call for getting file transcription
OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = speech.longRunningRecognizeAsync(config, audio);
while (!response.isDone()) {
System.out.println("Waiting for response...");
Thread.sleep(10000);
}
List<SpeechRecognitionResult> results = response.get().getResultsList();
for (SpeechRecognitionResult result : results) {
// There can be several alternative transcripts for a given chunk of speech. Just use the
// first (most likely) one here.
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
System.out.printf("Transcription: %s%n", alternative.getTranscript());
}
}
}
use of com.google.assistant.embedded.v1alpha2.SpeechRecognitionResult in project java-docs-samples by GoogleCloudPlatform.
the class Recognize method syncRecognizeGcs.
/**
* Performs speech recognition on remote FLAC file and prints the transcription.
*
* @param gcsUri the path to the remote FLAC audio file to transcribe.
*/
public static void syncRecognizeGcs(String gcsUri) throws Exception {
// Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
try (SpeechClient speech = SpeechClient.create()) {
// Builds the request for remote FLAC file
RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.FLAC).setLanguageCode("en-US").setSampleRateHertz(16000).build();
RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(gcsUri).build();
// Use blocking call for getting audio transcript
RecognizeResponse response = speech.recognize(config, audio);
List<SpeechRecognitionResult> results = response.getResultsList();
for (SpeechRecognitionResult result : results) {
// There can be several alternative transcripts for a given chunk of speech. Just use the
// first (most likely) one here.
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
System.out.printf("Transcription: %s%n", alternative.getTranscript());
}
}
}
Aggregations