use of com.ibm.watson.speech_to_text.v1.model.SpeechRecognitionResults in project java-sdk by watson-developer-cloud.
the class MicrophoneWithWebSocketsExample method main.
/**
* The main method.
*
* @param args the arguments
* @throws Exception the exception
*/
public static void main(final String[] args) throws Exception {
SpeechToText service = new SpeechToText();
service.setUsernameAndPassword("<username>", "<password>");
// Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono
int sampleRate = 16000;
AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
if (!AudioSystem.isLineSupported(info)) {
System.out.println("Line not supported");
System.exit(0);
}
TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
line.open(format);
line.start();
AudioInputStream audio = new AudioInputStream(line);
RecognizeOptions options = new RecognizeOptions.Builder().audio(audio).interimResults(true).timestamps(true).wordConfidence(true).contentType(HttpMediaType.AUDIO_RAW + ";rate=" + sampleRate).build();
service.recognizeUsingWebSocket(options, new BaseRecognizeCallback() {
@Override
public void onTranscription(SpeechRecognitionResults speechResults) {
System.out.println(speechResults);
}
});
System.out.println("Listening to your voice for the next 30s...");
Thread.sleep(30 * 1000);
// closing the WebSockets underlying InputStream will close the WebSocket itself.
line.stop();
line.close();
System.out.println("Fin.");
}
use of com.ibm.watson.speech_to_text.v1.model.SpeechRecognitionResults in project java-sdk by watson-developer-cloud.
the class RecognizeUsingWebSocketsExample method main.
public static void main(String[] args) throws FileNotFoundException, InterruptedException {
Authenticator authenticator = new IamAuthenticator("<iam_api_key>");
SpeechToText service = new SpeechToText(authenticator);
FileInputStream audio = new FileInputStream("src/test/resources/speech_to_text/sample1.wav");
RecognizeWithWebsocketsOptions options = new RecognizeWithWebsocketsOptions.Builder().audio(audio).interimResults(true).contentType(HttpMediaType.AUDIO_WAV).build();
service.recognizeUsingWebSocket(options, new BaseRecognizeCallback() {
@Override
public void onTranscription(SpeechRecognitionResults speechResults) {
System.out.println(speechResults);
}
@Override
public void onDisconnected() {
lock.countDown();
}
});
lock.await(1, TimeUnit.MINUTES);
}
use of com.ibm.watson.speech_to_text.v1.model.SpeechRecognitionResults in project java-sdk by watson-developer-cloud.
the class CustomizationExample method main.
/**
* The main method.
*
* @param args the arguments
* @throws InterruptedException the interrupted exception
*/
public static void main(String[] args) throws InterruptedException, FileNotFoundException {
Authenticator authenticator = new IamAuthenticator("<iam_api_key>");
SpeechToText service = new SpeechToText(authenticator);
// Create language model
CreateLanguageModelOptions createOptions = new CreateLanguageModelOptions.Builder().name("IEEE-permanent").baseModelName("en-US_BroadbandModel").description("My customization").build();
LanguageModel myModel = service.createLanguageModel(createOptions).execute().getResult();
String id = myModel.getCustomizationId();
try {
// Add a corpus file to the model
AddCorpusOptions addOptions = new AddCorpusOptions.Builder().customizationId(id).corpusName("corpus-1").corpusFile(new File(CORPUS_FILE)).allowOverwrite(false).build();
service.addCorpus(addOptions).execute().getResult();
// Get corpus status
GetCorpusOptions getOptions = new GetCorpusOptions.Builder().customizationId(id).corpusName("corpus-1").build();
for (int x = 0; x < 30 && !service.getCorpus(getOptions).execute().getResult().getStatus().equals(Corpus.Status.ANALYZED); x++) {
Thread.sleep(5000);
}
// Get all corpora
ListCorporaOptions listCorporaOptions = new ListCorporaOptions.Builder().customizationId(id).build();
Corpora corpora = service.listCorpora(listCorporaOptions).execute().getResult();
System.out.println(corpora);
// Get specific corpus
Corpus corpus = service.getCorpus(getOptions).execute().getResult();
System.out.println(corpus);
// Now add some user words to the custom model
service.addWord(new AddWordOptions.Builder().customizationId(id).wordName("IEEE").word("IEEE").displayAs("IEEE").addSoundsLike("I. triple E.").build()).execute();
service.addWord(new AddWordOptions.Builder().customizationId(id).wordName("hhonors").word("hhonors").displayAs("IEEE").addSoundsLike("H. honors").addSoundsLike("Hilton honors").build()).execute();
// Display all words in the words resource (OOVs from the corpus and
// new words just added) in ascending alphabetical order
ListWordsOptions listWordsAlphabeticalOptions = new ListWordsOptions.Builder().customizationId(id).wordType(ListWordsOptions.WordType.ALL).build();
Words words = service.listWords(listWordsAlphabeticalOptions).execute().getResult();
System.out.println("\nASCENDING ALPHABETICAL ORDER:");
System.out.println(words);
// Then display all words in the words resource in descending order
// by count
ListWordsOptions listWordsCountOptions = new ListWordsOptions.Builder().customizationId(id).wordType(ListWordsOptions.WordType.ALL).sort("-" + ListWordsOptions.Sort.COUNT).build();
words = service.listWords(listWordsCountOptions).execute().getResult();
System.out.println("\nDESCENDING ORDER BY COUNT:");
System.out.println(words);
// Now start training of the model
TrainLanguageModelOptions trainOptions = new TrainLanguageModelOptions.Builder().customizationId(id).wordTypeToAdd(TrainLanguageModelOptions.WordTypeToAdd.ALL).build();
service.trainLanguageModel(trainOptions).execute();
for (int x = 0; x < 30 && !myModel.getStatus().equals(LanguageModel.Status.AVAILABLE); x++) {
GetLanguageModelOptions getLanguageModelOptions = new GetLanguageModelOptions.Builder().customizationId(id).build();
myModel = service.getLanguageModel(getLanguageModelOptions).execute().getResult();
Thread.sleep(10000);
}
File audio = new File(AUDIO_FILE);
RecognizeOptions recognizeOptionsWithModel = new RecognizeOptions.Builder().model(RecognizeOptions.Model.EN_US_BROADBANDMODEL).customizationId(id).audio(audio).contentType(HttpMediaType.AUDIO_WAV).build();
RecognizeOptions recognizeOptionsWithoutModel = new RecognizeOptions.Builder().model(RecognizeOptions.Model.EN_US_BROADBANDMODEL).audio(audio).contentType(HttpMediaType.AUDIO_WAV).build();
// First decode WITHOUT the custom model
SpeechRecognitionResults transcript = service.recognize(recognizeOptionsWithoutModel).execute().getResult();
System.out.println(transcript);
// Now decode with the custom model
transcript = service.recognize(recognizeOptionsWithModel).execute().getResult();
System.out.println(transcript);
} finally {
DeleteLanguageModelOptions deleteOptions = new DeleteLanguageModelOptions.Builder().customizationId(id).build();
service.deleteLanguageModel(deleteOptions).execute();
}
}
use of com.ibm.watson.speech_to_text.v1.model.SpeechRecognitionResults in project java-sdk by watson-developer-cloud.
the class MicrophoneWithWebSocketsExample method main.
/**
* The main method.
*
* @param args the arguments
* @throws Exception the exception
*/
public static void main(final String[] args) throws Exception {
Authenticator authenticator = new IamAuthenticator("<iam_api_key>");
SpeechToText service = new SpeechToText(authenticator);
// Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono
int sampleRate = 16000;
AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
if (!AudioSystem.isLineSupported(info)) {
System.out.println("Line not supported");
System.exit(0);
}
TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
line.open(format);
line.start();
AudioInputStream audio = new AudioInputStream(line);
RecognizeWithWebsocketsOptions options = new RecognizeWithWebsocketsOptions.Builder().audio(audio).interimResults(true).timestamps(true).wordConfidence(true).contentType(HttpMediaType.AUDIO_RAW + ";rate=" + sampleRate).build();
service.recognizeUsingWebSocket(options, new BaseRecognizeCallback() {
@Override
public void onTranscription(SpeechRecognitionResults speechResults) {
System.out.println(speechResults);
}
});
System.out.println("Listening to your voice for the next 30s...");
Thread.sleep(30 * 1000);
// closing the WebSockets underlying InputStream will close the WebSocket itself.
line.stop();
line.close();
System.out.println("Fin.");
}
use of com.ibm.watson.speech_to_text.v1.model.SpeechRecognitionResults in project java-sdk by watson-developer-cloud.
the class SpeechToTextIT method testRecognizeFileString.
/**
* Test recognize audio file.
*
* @throws FileNotFoundException the file not found exception
*/
@Test
public void testRecognizeFileString() throws FileNotFoundException {
Long maxAlternatives = 3L;
Float wordAlternativesThreshold = 0.8f;
File audio = new File(SAMPLE_WAV);
RecognizeOptions options = new RecognizeOptions.Builder().audio(audio).contentType(HttpMediaType.AUDIO_WAV).maxAlternatives(maxAlternatives).wordAlternativesThreshold(wordAlternativesThreshold).smartFormatting(true).build();
SpeechRecognitionResults results = service.recognize(options).execute().getResult();
assertNotNull(results.getResults().get(0).getAlternatives().get(0).getTranscript());
assertTrue(results.getResults().get(0).getAlternatives().size() <= maxAlternatives);
List<WordAlternativeResults> wordAlternatives = results.getResults().get(0).getWordAlternatives();
for (WordAlternativeResults alternativeResults : wordAlternatives) {
assertTrue(alternativeResults.getAlternatives().get(0).getConfidence() >= wordAlternativesThreshold);
}
}
Aggregations