Search in sources :

Example 1 with TargetDataLine

use of javax.sound.sampled.TargetDataLine in project java-google-speech-api by goxr3plus.

the class GSpeechDuplex method upChannel.

/**
 * Streams data from the TargetDataLine to the API.
 *
 * @param urlStr
 *            The URL to stream to
 * @param tl
 *            The target data line to stream from.
 * @param af
 *            The AudioFormat to stream with.`
 * @throws LineUnavailableException
 *             If cannot open or stream the TargetDataLine.
 */
private Thread upChannel(String urlStr, TargetDataLine tl, AudioFormat af) throws LineUnavailableException {
    final String murl = urlStr;
    final TargetDataLine mtl = tl;
    final AudioFormat maf = af;
    if (!mtl.isOpen()) {
        mtl.open(maf);
        mtl.start();
    }
    Thread upChannelThread = new Thread("Upstream Thread") {

        public void run() {
            openHttpsPostConnection(murl, mtl, (int) maf.getSampleRate());
        }
    };
    upChannelThread.start();
    return upChannelThread;
}
Also used : AudioFormat(javax.sound.sampled.AudioFormat) TargetDataLine(javax.sound.sampled.TargetDataLine)

Example 2 with TargetDataLine

use of javax.sound.sampled.TargetDataLine in project Minim by ddf.

the class JSMinim method getAudioInput.

public AudioStream getAudioInput(int type, int bufferSize, float sampleRate, int bitDepth) {
    if (bitDepth != 8 && bitDepth != 16) {
        throw new IllegalArgumentException("Unsupported bit depth, use either 8 or 16.");
    }
    AudioFormat format = new AudioFormat(sampleRate, bitDepth, type, true, false);
    TargetDataLine line = getTargetDataLine(format, bufferSize * 4);
    if (line != null) {
        return new JSAudioInput(line, bufferSize);
    }
    return null;
}
Also used : AudioFormat(javax.sound.sampled.AudioFormat) MpegAudioFormat(javazoom.spi.mpeg.sampled.file.MpegAudioFormat) TargetDataLine(javax.sound.sampled.TargetDataLine)

Example 3 with TargetDataLine

use of javax.sound.sampled.TargetDataLine in project Minim by ddf.

the class JSMinim method getTargetDataLine.

TargetDataLine getTargetDataLine(AudioFormat format, int bufferSize) {
    TargetDataLine line = null;
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    if (AudioSystem.isLineSupported(info)) {
        try {
            if (inputMixer == null) {
                line = (TargetDataLine) AudioSystem.getLine(info);
            } else {
                line = (TargetDataLine) inputMixer.getLine(info);
            }
            line.open(format, bufferSize * format.getFrameSize());
            debug("TargetDataLine buffer size is " + line.getBufferSize() + "\n" + "TargetDataLine format is " + line.getFormat().toString() + "\n" + "TargetDataLine info is " + line.getLineInfo().toString());
        } catch (Exception e) {
            error("Error acquiring TargetDataLine: " + e.getMessage());
        }
    } else {
        error("Unable to return a TargetDataLine: unsupported format - " + format.toString());
    }
    return line;
}
Also used : TargetDataLine(javax.sound.sampled.TargetDataLine) DataLine(javax.sound.sampled.DataLine) SourceDataLine(javax.sound.sampled.SourceDataLine) LineUnavailableException(javax.sound.sampled.LineUnavailableException) MalformedURLException(java.net.MalformedURLException) UnsupportedAudioFileException(javax.sound.sampled.UnsupportedAudioFileException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) TargetDataLine(javax.sound.sampled.TargetDataLine)

Example 4 with TargetDataLine

use of javax.sound.sampled.TargetDataLine in project java-sdk by watson-developer-cloud.

the class MicrophoneWithWebSocketsExample method main.

/**
 * The main method.
 *
 * @param args the arguments
 * @throws Exception the exception
 */
public static void main(final String[] args) throws Exception {
    SpeechToText service = new SpeechToText();
    service.setUsernameAndPassword("<username>", "<password>");
    // Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono
    int sampleRate = 16000;
    AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    if (!AudioSystem.isLineSupported(info)) {
        System.out.println("Line not supported");
        System.exit(0);
    }
    TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
    line.open(format);
    line.start();
    AudioInputStream audio = new AudioInputStream(line);
    RecognizeOptions options = new RecognizeOptions.Builder().audio(audio).interimResults(true).timestamps(true).wordConfidence(true).contentType(HttpMediaType.AUDIO_RAW + ";rate=" + sampleRate).build();
    service.recognizeUsingWebSocket(options, new BaseRecognizeCallback() {

        @Override
        public void onTranscription(SpeechRecognitionResults speechResults) {
            System.out.println(speechResults);
        }
    });
    System.out.println("Listening to your voice for the next 30s...");
    Thread.sleep(30 * 1000);
    // closing the WebSockets underlying InputStream will close the WebSocket itself.
    line.stop();
    line.close();
    System.out.println("Fin.");
}
Also used : BaseRecognizeCallback(com.ibm.watson.developer_cloud.speech_to_text.v1.websocket.BaseRecognizeCallback) TargetDataLine(javax.sound.sampled.TargetDataLine) DataLine(javax.sound.sampled.DataLine) TargetDataLine(javax.sound.sampled.TargetDataLine) AudioInputStream(javax.sound.sampled.AudioInputStream) AudioFormat(javax.sound.sampled.AudioFormat) SpeechRecognitionResults(com.ibm.watson.developer_cloud.speech_to_text.v1.model.SpeechRecognitionResults) RecognizeOptions(com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions)

Example 5 with TargetDataLine

use of javax.sound.sampled.TargetDataLine in project aws-doc-sdk-examples by awsdocs.

the class BidirectionalStreaming method convertAudio.

// snippet-start:[transcribe.java2.bidir_streaming.main]
public static void convertAudio(TranscribeStreamingAsyncClient client) throws Exception {
    try {
        StartStreamTranscriptionRequest request = StartStreamTranscriptionRequest.builder().mediaEncoding(MediaEncoding.PCM).languageCode(LanguageCode.EN_US).mediaSampleRateHertz(16_000).build();
        TargetDataLine mic = Microphone.get();
        mic.start();
        AudioStreamPublisher publisher = new AudioStreamPublisher(new AudioInputStream(mic));
        StartStreamTranscriptionResponseHandler response = StartStreamTranscriptionResponseHandler.builder().subscriber(e -> {
            TranscriptEvent event = (TranscriptEvent) e;
            event.transcript().results().forEach(r -> r.alternatives().forEach(a -> System.out.println(a.transcript())));
        }).build();
        // Keeps Streaming until you end the Java program
        client.startStreamTranscription(request, publisher, response);
    } catch (TranscribeStreamingException e) {
        System.err.println(e.awsErrorDetails().errorMessage());
        System.exit(1);
    }
}
Also used : AudioInputStream(javax.sound.sampled.AudioInputStream) TargetDataLine(javax.sound.sampled.TargetDataLine) LanguageCode(software.amazon.awssdk.services.transcribestreaming.model.LanguageCode) MediaEncoding(software.amazon.awssdk.services.transcribestreaming.model.MediaEncoding) TranscriptEvent(software.amazon.awssdk.services.transcribestreaming.model.TranscriptEvent) AudioFormat(javax.sound.sampled.AudioFormat) DataLine(javax.sound.sampled.DataLine) TranscribeStreamingAsyncClient(software.amazon.awssdk.services.transcribestreaming.TranscribeStreamingAsyncClient) StartStreamTranscriptionRequest(software.amazon.awssdk.services.transcribestreaming.model.StartStreamTranscriptionRequest) AudioInputStream(javax.sound.sampled.AudioInputStream) Region(software.amazon.awssdk.regions.Region) TranscribeStreamingException(software.amazon.awssdk.services.transcribestreaming.model.TranscribeStreamingException) AudioSystem(javax.sound.sampled.AudioSystem) StartStreamTranscriptionResponseHandler(software.amazon.awssdk.services.transcribestreaming.model.StartStreamTranscriptionResponseHandler) TranscriptEvent(software.amazon.awssdk.services.transcribestreaming.model.TranscriptEvent) StartStreamTranscriptionRequest(software.amazon.awssdk.services.transcribestreaming.model.StartStreamTranscriptionRequest) StartStreamTranscriptionResponseHandler(software.amazon.awssdk.services.transcribestreaming.model.StartStreamTranscriptionResponseHandler) TranscribeStreamingException(software.amazon.awssdk.services.transcribestreaming.model.TranscribeStreamingException) TargetDataLine(javax.sound.sampled.TargetDataLine)

Aggregations

TargetDataLine (javax.sound.sampled.TargetDataLine)12 DataLine (javax.sound.sampled.DataLine)9 AudioFormat (javax.sound.sampled.AudioFormat)8 LineUnavailableException (javax.sound.sampled.LineUnavailableException)4 IOException (java.io.IOException)2 AudioInputStream (javax.sound.sampled.AudioInputStream)2 SourceDataLine (javax.sound.sampled.SourceDataLine)2 UnsupportedAudioFileException (javax.sound.sampled.UnsupportedAudioFileException)2 RecognizeOptions (com.ibm.watson.developer_cloud.speech_to_text.v1.model.RecognizeOptions)1 SpeechRecognitionResults (com.ibm.watson.developer_cloud.speech_to_text.v1.model.SpeechRecognitionResults)1 BaseRecognizeCallback (com.ibm.watson.developer_cloud.speech_to_text.v1.websocket.BaseRecognizeCallback)1 FileNotFoundException (java.io.FileNotFoundException)1 MalformedURLException (java.net.MalformedURLException)1 ShortBuffer (java.nio.ShortBuffer)1 ScheduledThreadPoolExecutor (java.util.concurrent.ScheduledThreadPoolExecutor)1 AudioSystem (javax.sound.sampled.AudioSystem)1 Line (javax.sound.sampled.Line)1 Mixer (javax.sound.sampled.Mixer)1 MpegAudioFormat (javazoom.spi.mpeg.sampled.file.MpegAudioFormat)1 CanvasFrame (org.bytedeco.javacv.CanvasFrame)1