Search in sources :

Example 16 with RecognizeOptions

use of com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions in project java-sdk by watson-developer-cloud.

the class SpeechToTextTest method testClosingInputStreamClosesWebSocket.

@Test
public void testClosingInputStreamClosesWebSocket() throws Exception {
    TestRecognizeCallback callback = new TestRecognizeCallback();
    WebSocketRecorder webSocketRecorder = new WebSocketRecorder("server");
    PipedOutputStream outputStream = new PipedOutputStream();
    InputStream inputStream = new PipedInputStream(outputStream);
    server.enqueue(new MockResponse().withWebSocketUpgrade(webSocketRecorder));
    String customizationId = "id";
    String version = "version";
    Double customizationWeight = 0.1;
    RecognizeOptions options = new RecognizeOptions.Builder().audio(inputStream).contentType(HttpMediaType.createAudioRaw(44000)).customizationId(customizationId).version(version).customizationWeight(customizationWeight).build();
    service.recognizeUsingWebSocket(options, callback);
    WebSocket serverSocket = webSocketRecorder.assertOpen();
    serverSocket.send("{\"state\": {}}");
    outputStream.write(ByteString.encodeUtf8("test").toByteArray());
    outputStream.close();
    webSocketRecorder.assertTextMessage("{\"content-type\":\"audio/l16; rate=44000\"," + "\"action\":\"start\"}");
    webSocketRecorder.assertBinaryMessage(ByteString.encodeUtf8("test"));
    webSocketRecorder.assertTextMessage("{\"action\":\"stop\"}");
    webSocketRecorder.assertExhausted();
    serverSocket.close(1000, null);
    callback.assertConnected();
    callback.assertDisconnected();
    callback.assertNoErrors();
    callback.assertOnTranscriptionComplete();
}
Also used : MockResponse(okhttp3.mockwebserver.MockResponse) PipedInputStream(java.io.PipedInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) PipedOutputStream(java.io.PipedOutputStream) PipedInputStream(java.io.PipedInputStream) ByteString(okio.ByteString) WebSocketRecorder(okhttp3.internal.ws.WebSocketRecorder) RecognizeOptions(com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions) WebSocket(okhttp3.WebSocket) WatsonServiceUnitTest(com.ibm.watson.developer_cloud.WatsonServiceUnitTest) Test(org.junit.Test)

Example 17 with RecognizeOptions

use of com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions in project java-sdk by watson-developer-cloud.

the class SpeechToTextTest method testRecognize.

/**
 * Test recognize.
 *
 * @throws URISyntaxException the URI syntax exception
 * @throws InterruptedException the interrupted exception
 */
@Test
public void testRecognize() throws URISyntaxException, InterruptedException, FileNotFoundException {
    server.enqueue(new MockResponse().addHeader(CONTENT_TYPE, HttpMediaType.APPLICATION_JSON).setBody(GSON.toJson(recognitionResults)));
    RecognizeOptions recognizeOptions = new RecognizeOptions.Builder().audio(SAMPLE_WAV).contentType(RecognizeOptions.ContentType.AUDIO_WAV).build();
    final SpeechRecognitionResults result = service.recognize(recognizeOptions).execute();
    final RecordedRequest request = server.takeRequest();
    assertNotNull(result);
    assertEquals(result, recognitionResults);
    assertEquals("POST", request.getMethod());
    assertEquals(PATH_RECOGNIZE, request.getPath());
    assertEquals(HttpMediaType.AUDIO_WAV, request.getHeader(CONTENT_TYPE));
}
Also used : RecordedRequest(okhttp3.mockwebserver.RecordedRequest) MockResponse(okhttp3.mockwebserver.MockResponse) SpeechRecognitionResults(com.ibm.watson.developer_cloud.speech_to_text.v1.model.SpeechRecognitionResults) RecognizeOptions(com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions) WatsonServiceUnitTest(com.ibm.watson.developer_cloud.WatsonServiceUnitTest) Test(org.junit.Test)

Example 18 with RecognizeOptions

use of com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions in project java-sdk by watson-developer-cloud.

the class SpeechToTextTest method testRecognizeWithSpeakerLabels.

/**
 * Test diarization.
 *
 * @throws URISyntaxException the URI syntax exception
 * @throws InterruptedException the interrupted exception
 * @throws FileNotFoundException the file not found exception
 */
@Test
public void testRecognizeWithSpeakerLabels() throws URISyntaxException, InterruptedException, FileNotFoundException {
    FileInputStream jsonFile = new FileInputStream("src/test/resources/speech_to_text/diarization.json");
    String diarizationStr = getStringFromInputStream(jsonFile);
    JsonObject diarization = new JsonParser().parse(diarizationStr).getAsJsonObject();
    server.enqueue(new MockResponse().addHeader(CONTENT_TYPE, HttpMediaType.APPLICATION_JSON).setBody(diarizationStr));
    RecognizeOptions recognizeOptions = new RecognizeOptions.Builder().audio(SAMPLE_WAV).contentType(RecognizeOptions.ContentType.AUDIO_WAV).speakerLabels(true).build();
    SpeechRecognitionResults result = service.recognize(recognizeOptions).execute();
    final RecordedRequest request = server.takeRequest();
    assertEquals("POST", request.getMethod());
    assertEquals(PATH_RECOGNIZE + "?speaker_labels=true", request.getPath());
    assertEquals(diarization.toString(), GSON.toJsonTree(result).toString());
}
Also used : RecordedRequest(okhttp3.mockwebserver.RecordedRequest) MockResponse(okhttp3.mockwebserver.MockResponse) JsonObject(com.google.gson.JsonObject) ByteString(okio.ByteString) FileInputStream(java.io.FileInputStream) SpeechRecognitionResults(com.ibm.watson.developer_cloud.speech_to_text.v1.model.SpeechRecognitionResults) JsonParser(com.google.gson.JsonParser) RecognizeOptions(com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions) WatsonServiceUnitTest(com.ibm.watson.developer_cloud.WatsonServiceUnitTest) Test(org.junit.Test)

Example 19 with RecognizeOptions

use of com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions in project java-sdk by watson-developer-cloud.

the class SpeechToTextTest method testRecognizeWithCustomization.

/**
 * Test recognize with customization.
 *
 * @throws FileNotFoundException the file not found exception
 * @throws InterruptedException the interrupted exception
 */
@Test
public void testRecognizeWithCustomization() throws FileNotFoundException, InterruptedException {
    String id = "foo";
    String version = "version";
    String recString = getStringFromInputStream(new FileInputStream("src/test/resources/speech_to_text/recognition.json"));
    JsonObject recognition = new JsonParser().parse(recString).getAsJsonObject();
    server.enqueue(new MockResponse().addHeader(CONTENT_TYPE, HttpMediaType.APPLICATION_JSON).setBody(recString));
    RecognizeOptions recognizeOptions = new RecognizeOptions.Builder().audio(SAMPLE_WAV).contentType(RecognizeOptions.ContentType.AUDIO_WAV).customizationId(id).version(version).build();
    SpeechRecognitionResults result = service.recognize(recognizeOptions).execute();
    final RecordedRequest request = server.takeRequest();
    assertEquals("POST", request.getMethod());
    assertEquals(PATH_RECOGNIZE + "?customization_id=" + id + "&version=" + version, request.getPath());
    assertEquals(recognition, GSON.toJsonTree(result));
}
Also used : RecordedRequest(okhttp3.mockwebserver.RecordedRequest) MockResponse(okhttp3.mockwebserver.MockResponse) JsonObject(com.google.gson.JsonObject) ByteString(okio.ByteString) FileInputStream(java.io.FileInputStream) SpeechRecognitionResults(com.ibm.watson.developer_cloud.speech_to_text.v1.model.SpeechRecognitionResults) JsonParser(com.google.gson.JsonParser) RecognizeOptions(com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions) WatsonServiceUnitTest(com.ibm.watson.developer_cloud.WatsonServiceUnitTest) Test(org.junit.Test)

Example 20 with RecognizeOptions

use of com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions in project java-sdk by watson-developer-cloud.

the class SpeechToText method recognize.

/**
 * Recognizes an audio file and returns {@link SpeechRecognitionResults}.<br>
 * <br>
 * Here is an example of how to recognize an audio file:
 *
 * <pre>
 * SpeechToText service = new SpeechToText();
 * service.setUsernameAndPassword(&quot;USERNAME&quot;, &quot;PASSWORD&quot;);
 * service.setEndPoint(&quot;SERVICE_URL&quot;);
 *
 * RecognizeOptions options = new RecognizeOptions().maxAlternatives(3).continuous(true);
 *
 * File audio = new File(&quot;sample1.wav&quot;);
 *
 * SpeechResults results = service.recognize(audio, options).execute();
 * System.out.println(results);
 * </pre>
 *
 * @param recognizeOptions the recognize options
 * @return the {@link SpeechRecognitionResults}
 */
public ServiceCall<SpeechRecognitionResults> recognize(RecognizeOptions recognizeOptions) {
    String[] pathSegments = { "v1/recognize" };
    RequestBuilder builder = RequestBuilder.post(RequestBuilder.constructHttpUrl(getEndPoint(), pathSegments));
    if (recognizeOptions != null) {
        if (recognizeOptions.contentType() != null) {
            builder.header("Content-Type", recognizeOptions.contentType());
        }
        if (recognizeOptions.model() != null) {
            builder.query("model", recognizeOptions.model());
        }
        if (recognizeOptions.customizationId() != null) {
            builder.query("customization_id", recognizeOptions.customizationId());
        }
        if (recognizeOptions.acousticCustomizationId() != null) {
            builder.query("acoustic_customization_id", recognizeOptions.acousticCustomizationId());
        }
        if (recognizeOptions.customizationWeight() != null) {
            builder.query("customization_weight", String.valueOf(recognizeOptions.customizationWeight()));
        }
        if (recognizeOptions.version() != null) {
            builder.query("version", recognizeOptions.version());
        }
        if (recognizeOptions.inactivityTimeout() != null) {
            builder.query("inactivity_timeout", String.valueOf(recognizeOptions.inactivityTimeout()));
        }
        if (recognizeOptions.keywords() != null) {
            builder.query("keywords", RequestUtils.join(recognizeOptions.keywords(), ","));
        }
        if (recognizeOptions.keywordsThreshold() != null) {
            builder.query("keywords_threshold", String.valueOf(recognizeOptions.keywordsThreshold()));
        }
        if (recognizeOptions.maxAlternatives() != null) {
            builder.query("max_alternatives", String.valueOf(recognizeOptions.maxAlternatives()));
        }
        if (recognizeOptions.wordAlternativesThreshold() != null) {
            builder.query("word_alternatives_threshold", String.valueOf(recognizeOptions.wordAlternativesThreshold()));
        }
        if (recognizeOptions.wordConfidence() != null) {
            builder.query("word_confidence", String.valueOf(recognizeOptions.wordConfidence()));
        }
        if (recognizeOptions.timestamps() != null) {
            builder.query("timestamps", String.valueOf(recognizeOptions.timestamps()));
        }
        if (recognizeOptions.profanityFilter() != null) {
            builder.query("profanity_filter", String.valueOf(recognizeOptions.profanityFilter()));
        }
        if (recognizeOptions.smartFormatting() != null) {
            builder.query("smart_formatting", String.valueOf(recognizeOptions.smartFormatting()));
        }
        if (recognizeOptions.speakerLabels() != null) {
            builder.query("speaker_labels", String.valueOf(recognizeOptions.speakerLabels()));
        }
        if (recognizeOptions.audio() != null) {
            builder.body(InputStreamRequestBody.create(MediaType.parse(recognizeOptions.contentType()), recognizeOptions.audio()));
        }
    }
    return createServiceCall(builder.build(), ResponseConverterUtils.getObject(SpeechRecognitionResults.class));
}
Also used : RequestBuilder(com.ibm.watson.developer_cloud.http.RequestBuilder) SpeechRecognitionResults(com.ibm.watson.developer_cloud.speech_to_text.v1.model.SpeechRecognitionResults)

Aggregations

RecognizeOptions (com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions)18 SpeechRecognitionResults (com.ibm.watson.developer_cloud.speech_to_text.v1.model.SpeechRecognitionResults)17 Test (org.junit.Test)13 FileInputStream (java.io.FileInputStream)9 WatsonServiceUnitTest (com.ibm.watson.developer_cloud.WatsonServiceUnitTest)7 MockResponse (okhttp3.mockwebserver.MockResponse)7 WatsonServiceTest (com.ibm.watson.developer_cloud.WatsonServiceTest)6 File (java.io.File)6 RecordedRequest (okhttp3.mockwebserver.RecordedRequest)6 BaseRecognizeCallback (com.ibm.watson.developer_cloud.speech_to_text.v1.websocket.BaseRecognizeCallback)5 ByteString (okio.ByteString)5 JsonObject (com.google.gson.JsonObject)4 JsonParser (com.google.gson.JsonParser)4 NotFoundException (com.ibm.watson.developer_cloud.service.exception.NotFoundException)2 WordAlternativeResults (com.ibm.watson.developer_cloud.speech_to_text.v1.model.WordAlternativeResults)2 FileNotFoundException (java.io.FileNotFoundException)2 ExpectedException (org.junit.rules.ExpectedException)2 RequestBuilder (com.ibm.watson.developer_cloud.http.RequestBuilder)1 AddCorpusOptions (com.ibm.watson.developer_cloud.speech_to_text.v1.model.AddCorpusOptions)1 AddWordOptions (com.ibm.watson.developer_cloud.speech_to_text.v1.model.AddWordOptions)1