use of com.google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest in project google-cloud-java by GoogleCloudPlatform.
the class SpeechClientTest method longRunningRecognizeTest.
@Test
@SuppressWarnings("all")
public void longRunningRecognizeTest() throws Exception {
LongRunningRecognizeResponse expectedResponse = LongRunningRecognizeResponse.newBuilder().build();
Operation resultOperation = Operation.newBuilder().setName("longRunningRecognizeTest").setDone(true).setResponse(Any.pack(expectedResponse)).build();
mockSpeech.addResponse(resultOperation);
RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC;
int sampleRateHertz = 44100;
String languageCode = "en-US";
RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(encoding).setSampleRateHertz(sampleRateHertz).setLanguageCode(languageCode).build();
String uri = "gs://bucket_name/file_name.flac";
RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build();
LongRunningRecognizeResponse actualResponse = client.longRunningRecognizeAsync(config, audio).get();
Assert.assertEquals(expectedResponse, actualResponse);
List<GeneratedMessageV3> actualRequests = mockSpeech.getRequests();
Assert.assertEquals(1, actualRequests.size());
LongRunningRecognizeRequest actualRequest = (LongRunningRecognizeRequest) actualRequests.get(0);
Assert.assertEquals(config, actualRequest.getConfig());
Assert.assertEquals(audio, actualRequest.getAudio());
}
use of com.google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest in project java-speech by googleapis.
the class ExportToStorageBeta method exportToStorage.
// Exports the recognized output to specified GCS destination.
public static void exportToStorage(String inputUri, String outputStorageUri, String encoding, int sampleRateHertz, String languageCode, String bucketName, String objectName) throws IOException, ExecutionException, InterruptedException {
// the "close" method on the client to safely clean up any remaining background resources.
try (SpeechClient speechClient = SpeechClient.create()) {
RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(inputUri).build();
AudioEncoding audioEncoding = AudioEncoding.valueOf(encoding);
// Instantiates a client
Storage storage = StorageOptions.getDefaultInstance().getService();
// Pass in the URI of the Cloud Storage bucket to hold the transcription
TranscriptOutputConfig outputConfig = TranscriptOutputConfig.newBuilder().setGcsUri(outputStorageUri).build();
RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(audioEncoding).setSampleRateHertz(sampleRateHertz).setLanguageCode(languageCode).build();
LongRunningRecognizeRequest request = LongRunningRecognizeRequest.newBuilder().setConfig(config).setAudio(audio).setOutputConfig(outputConfig).build();
OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata> future = speechClient.longRunningRecognizeAsync(request);
System.out.println("Waiting for operation to complete...");
future.get();
// Get blob given bucket and object name
Blob blob = storage.get(BlobId.of(bucketName, objectName));
// Extract byte contents from blob
byte[] bytes = blob.getContent();
// Get decoded representation
String decoded = new String(bytes, "UTF-8");
// Create json object
JSONObject jsonObject = new JSONObject(decoded);
// Get json string
String json = jsonObject.toString();
// Specefy the proto type message
LongRunningRecognizeResponse.Builder builder = LongRunningRecognizeResponse.newBuilder();
// Construct a parser
JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields();
// Parses from JSON into a protobuf message.
parser.merge(json, builder);
// Get the converted values
LongRunningRecognizeResponse storageResponse = builder.build();
System.out.println("Results saved to specified output Cloud Storage bucket.");
String output = storageResponse.getResultsList().stream().map(result -> String.valueOf(result.getAlternatives(0).getTranscript())).collect(Collectors.joining("\n"));
System.out.printf("Transcription: %s", output);
}
}
Aggregations