use of javax.sound.sampled.TargetDataLine in project java-google-speech-api by goxr3plus.
the class GSpeechDuplex method upChannel.
/**
* Streams data from the TargetDataLine to the API.
*
* @param urlStr
* The URL to stream to
* @param tl
* The target data line to stream from.
* @param af
* The AudioFormat to stream with.`
* @throws LineUnavailableException
* If cannot open or stream the TargetDataLine.
*/
private Thread upChannel(String urlStr, TargetDataLine tl, AudioFormat af) throws LineUnavailableException {
final String murl = urlStr;
final TargetDataLine mtl = tl;
final AudioFormat maf = af;
if (!mtl.isOpen()) {
mtl.open(maf);
mtl.start();
}
Thread upChannelThread = new Thread("Upstream Thread") {
public void run() {
openHttpsPostConnection(murl, mtl, (int) maf.getSampleRate());
}
};
upChannelThread.start();
return upChannelThread;
}
use of javax.sound.sampled.TargetDataLine in project Minim by ddf.
the class JSMinim method getAudioInput.
public AudioStream getAudioInput(int type, int bufferSize, float sampleRate, int bitDepth) {
if (bitDepth != 8 && bitDepth != 16) {
throw new IllegalArgumentException("Unsupported bit depth, use either 8 or 16.");
}
AudioFormat format = new AudioFormat(sampleRate, bitDepth, type, true, false);
TargetDataLine line = getTargetDataLine(format, bufferSize * 4);
if (line != null) {
return new JSAudioInput(line, bufferSize);
}
return null;
}
use of javax.sound.sampled.TargetDataLine in project Minim by ddf.
the class JSMinim method getTargetDataLine.
TargetDataLine getTargetDataLine(AudioFormat format, int bufferSize) {
TargetDataLine line = null;
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
if (AudioSystem.isLineSupported(info)) {
try {
if (inputMixer == null) {
line = (TargetDataLine) AudioSystem.getLine(info);
} else {
line = (TargetDataLine) inputMixer.getLine(info);
}
line.open(format, bufferSize * format.getFrameSize());
debug("TargetDataLine buffer size is " + line.getBufferSize() + "\n" + "TargetDataLine format is " + line.getFormat().toString() + "\n" + "TargetDataLine info is " + line.getLineInfo().toString());
} catch (Exception e) {
error("Error acquiring TargetDataLine: " + e.getMessage());
}
} else {
error("Unable to return a TargetDataLine: unsupported format - " + format.toString());
}
return line;
}
use of javax.sound.sampled.TargetDataLine in project java-sdk by watson-developer-cloud.
the class MicrophoneWithWebSocketsExample method main.
/**
* The main method.
*
* @param args the arguments
* @throws Exception the exception
*/
public static void main(final String[] args) throws Exception {
SpeechToText service = new SpeechToText();
service.setUsernameAndPassword("<username>", "<password>");
// Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono
int sampleRate = 16000;
AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
if (!AudioSystem.isLineSupported(info)) {
System.out.println("Line not supported");
System.exit(0);
}
TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
line.open(format);
line.start();
AudioInputStream audio = new AudioInputStream(line);
RecognizeOptions options = new RecognizeOptions.Builder().audio(audio).interimResults(true).timestamps(true).wordConfidence(true).contentType(HttpMediaType.AUDIO_RAW + ";rate=" + sampleRate).build();
service.recognizeUsingWebSocket(options, new BaseRecognizeCallback() {
@Override
public void onTranscription(SpeechRecognitionResults speechResults) {
System.out.println(speechResults);
}
});
System.out.println("Listening to your voice for the next 30s...");
Thread.sleep(30 * 1000);
// closing the WebSockets underlying InputStream will close the WebSocket itself.
line.stop();
line.close();
System.out.println("Fin.");
}
use of javax.sound.sampled.TargetDataLine in project aws-doc-sdk-examples by awsdocs.
the class BidirectionalStreaming method convertAudio.
// snippet-start:[transcribe.java2.bidir_streaming.main]
public static void convertAudio(TranscribeStreamingAsyncClient client) throws Exception {
try {
StartStreamTranscriptionRequest request = StartStreamTranscriptionRequest.builder().mediaEncoding(MediaEncoding.PCM).languageCode(LanguageCode.EN_US).mediaSampleRateHertz(16_000).build();
TargetDataLine mic = Microphone.get();
mic.start();
AudioStreamPublisher publisher = new AudioStreamPublisher(new AudioInputStream(mic));
StartStreamTranscriptionResponseHandler response = StartStreamTranscriptionResponseHandler.builder().subscriber(e -> {
TranscriptEvent event = (TranscriptEvent) e;
event.transcript().results().forEach(r -> r.alternatives().forEach(a -> System.out.println(a.transcript())));
}).build();
// Keeps Streaming until you end the Java program
client.startStreamTranscription(request, publisher, response);
} catch (TranscribeStreamingException e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}
Aggregations