use of javax.sound.sampled.SourceDataLine in project jdk8u_jdk by JetBrains.
the class SoftMixingMixer method open.
public void open(SourceDataLine line) throws LineUnavailableException {
if (isOpen()) {
implicitOpen = false;
return;
}
synchronized (control_mutex) {
try {
if (line != null)
format = line.getFormat();
AudioInputStream ais = openStream(getFormat());
if (line == null) {
synchronized (SoftMixingMixerProvider.mutex) {
SoftMixingMixerProvider.lockthread = Thread.currentThread();
}
try {
Mixer defaultmixer = AudioSystem.getMixer(null);
if (defaultmixer != null) {
// Search for suitable line
DataLine.Info idealinfo = null;
AudioFormat idealformat = null;
Line.Info[] lineinfos = defaultmixer.getSourceLineInfo();
idealFound: for (int i = 0; i < lineinfos.length; i++) {
if (lineinfos[i].getLineClass() == SourceDataLine.class) {
DataLine.Info info = (DataLine.Info) lineinfos[i];
AudioFormat[] formats = info.getFormats();
for (int j = 0; j < formats.length; j++) {
AudioFormat format = formats[j];
if (format.getChannels() == 2 || format.getChannels() == AudioSystem.NOT_SPECIFIED)
if (format.getEncoding().equals(Encoding.PCM_SIGNED) || format.getEncoding().equals(Encoding.PCM_UNSIGNED))
if (format.getSampleRate() == AudioSystem.NOT_SPECIFIED || format.getSampleRate() == 48000.0)
if (format.getSampleSizeInBits() == AudioSystem.NOT_SPECIFIED || format.getSampleSizeInBits() == 16) {
idealinfo = info;
int ideal_channels = format.getChannels();
boolean ideal_signed = format.getEncoding().equals(Encoding.PCM_SIGNED);
float ideal_rate = format.getSampleRate();
boolean ideal_endian = format.isBigEndian();
int ideal_bits = format.getSampleSizeInBits();
if (ideal_bits == AudioSystem.NOT_SPECIFIED)
ideal_bits = 16;
if (ideal_channels == AudioSystem.NOT_SPECIFIED)
ideal_channels = 2;
if (ideal_rate == AudioSystem.NOT_SPECIFIED)
ideal_rate = 48000;
idealformat = new AudioFormat(ideal_rate, ideal_bits, ideal_channels, ideal_signed, ideal_endian);
break idealFound;
}
}
}
}
if (idealformat != null) {
format = idealformat;
line = (SourceDataLine) defaultmixer.getLine(idealinfo);
}
}
if (line == null)
line = AudioSystem.getSourceDataLine(format);
} finally {
synchronized (SoftMixingMixerProvider.mutex) {
SoftMixingMixerProvider.lockthread = null;
}
}
if (line == null)
throw new IllegalArgumentException("No line matching " + info.toString() + " is supported.");
}
double latency = this.latency;
if (!line.isOpen()) {
int bufferSize = getFormat().getFrameSize() * (int) (getFormat().getFrameRate() * (latency / 1000000f));
line.open(getFormat(), bufferSize);
// Remember that we opened that line
// so we can close again in SoftSynthesizer.close()
sourceDataLine = line;
}
if (!line.isActive())
line.start();
int controlbuffersize = 512;
try {
controlbuffersize = ais.available();
} catch (IOException e) {
}
// Tell mixer not fill read buffers fully.
// This lowers latency, and tells DataPusher
// to read in smaller amounts.
// mainmixer.readfully = false;
// pusher = new DataPusher(line, ais);
int buffersize = line.getBufferSize();
buffersize -= buffersize % controlbuffersize;
if (buffersize < 3 * controlbuffersize)
buffersize = 3 * controlbuffersize;
if (jitter_correction) {
ais = new SoftJitterCorrector(ais, buffersize, controlbuffersize);
}
pusher = new SoftAudioPusher(line, ais, controlbuffersize);
pusher_stream = ais;
pusher.start();
} catch (LineUnavailableException e) {
if (isOpen())
close();
throw new LineUnavailableException(e.toString());
}
}
}
use of javax.sound.sampled.SourceDataLine in project jdk8u_jdk by JetBrains.
the class JavaSoundAudioClip method createSourceDataLine.
private boolean createSourceDataLine() {
if (DEBUG || Printer.debug)
Printer.debug("JavaSoundAudioClip.createSourceDataLine()");
try {
DataLine.Info info = new DataLine.Info(SourceDataLine.class, loadedAudioFormat);
if (!(AudioSystem.isLineSupported(info))) {
if (DEBUG || Printer.err)
Printer.err("Line not supported: " + loadedAudioFormat);
// fail silently
return false;
}
SourceDataLine source = (SourceDataLine) AudioSystem.getLine(info);
datapusher = new DataPusher(source, loadedAudioFormat, loadedAudio, loadedAudioByteLength);
} catch (Exception e) {
if (DEBUG || Printer.err)
e.printStackTrace();
// fail silently
return false;
}
if (datapusher == null) {
// fail silently
return false;
}
if (DEBUG || Printer.debug)
Printer.debug("Created SourceDataLine.");
return true;
}
use of javax.sound.sampled.SourceDataLine in project Minim by ddf.
the class JSStreamingSampleRecorder method save.
/**
* Finishes the recording process by closing the file.
*/
public AudioRecordingStream save() {
try {
aos.close();
} catch (IOException e) {
Minim.error("AudioRecorder.save: An error occurred when trying to save the file:\n" + e.getMessage());
}
String filePath = filePath();
AudioInputStream ais = system.getAudioInputStream(filePath);
SourceDataLine sdl = system.getSourceDataLine(ais.getFormat(), 1024);
// this is fine because the recording will always be
// in a raw format (WAV, AU, etc).
long length = AudioUtils.frames2Millis(ais.getFrameLength(), format);
BasicMetaData meta = new BasicMetaData(filePath, length, ais.getFrameLength());
JSPCMAudioRecordingStream recording = new JSPCMAudioRecordingStream(system, meta, ais, sdl, 1024);
return recording;
}
use of javax.sound.sampled.SourceDataLine in project Minim by ddf.
the class JSMinim method getAudioRecordingStream.
public AudioRecordingStream getAudioRecordingStream(String filename, int bufferSize, boolean inMemory) {
// TODO: deal with the case of wanting to have the file fully in memory
AudioRecordingStream mstream = null;
AudioInputStream ais = getAudioInputStream(filename);
if (ais != null) {
if (inMemory && ais.markSupported()) {
ais.mark((int) ais.getFrameLength() * ais.getFormat().getFrameSize());
}
debug("Reading from " + ais.getClass().toString());
debug("File format is: " + ais.getFormat().toString());
AudioFormat format = ais.getFormat();
// they need to be converted to PCM
if (format instanceof MpegAudioFormat) {
AudioFormat baseFormat = format;
format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, baseFormat.getSampleRate(), 16, baseFormat.getChannels(), baseFormat.getChannels() * 2, baseFormat.getSampleRate(), false);
// converts the stream to PCM audio from mp3 audio
AudioInputStream decAis = getAudioInputStream(format, ais);
// source data line is for sending the file audio out to the
// speakers
SourceDataLine line = getSourceDataLine(format, bufferSize);
if (decAis != null && line != null) {
Map<String, Object> props = getID3Tags(filename);
long lengthInMillis = -1;
if (props.containsKey("duration")) {
Long dur = (Long) props.get("duration");
if (dur.longValue() > 0) {
lengthInMillis = dur.longValue() / 1000;
}
}
MP3MetaData meta = new MP3MetaData(filename, lengthInMillis, props);
mstream = new JSMPEGAudioRecordingStream(this, meta, ais, decAis, line, bufferSize);
}
} else // format instanceof MpegAudioFormat
{
// source data line is for sending the file audio out to the
// speakers
SourceDataLine line = getSourceDataLine(format, bufferSize);
if (line != null) {
long length = AudioUtils.frames2Millis(ais.getFrameLength(), format);
BasicMetaData meta = new BasicMetaData(filename, length, ais.getFrameLength());
mstream = new JSPCMAudioRecordingStream(this, meta, ais, line, bufferSize);
}
}
// else
}
// ais != null
return mstream;
}
use of javax.sound.sampled.SourceDataLine in project Minim by ddf.
the class JSMinim method getAudioRecording.
/** @deprecated */
public AudioRecording getAudioRecording(String filename) {
AudioMetaData meta = null;
AudioInputStream ais = getAudioInputStream(filename);
byte[] samples;
if (ais != null) {
AudioFormat format = ais.getFormat();
if (format instanceof MpegAudioFormat) {
AudioFormat baseFormat = format;
format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, baseFormat.getSampleRate(), 16, baseFormat.getChannels(), baseFormat.getChannels() * 2, baseFormat.getSampleRate(), false);
// converts the stream to PCM audio from mp3 audio
ais = getAudioInputStream(format, ais);
// get a map of properties so we can find out how long it is
Map<String, Object> props = getID3Tags(filename);
// there is a property called mp3.length.bytes, but that is
// the length in bytes of the mp3 file, which will of course
// be much shorter than the decoded version. so we use the
// duration of the file to figure out how many bytes the
// decoded file will be.
long dur = ((Long) props.get("duration")).longValue();
int toRead = (int) AudioUtils.millis2Bytes(dur / 1000, format);
samples = loadByteAudio(ais, toRead);
meta = new MP3MetaData(filename, dur / 1000, props);
} else {
samples = loadByteAudio(ais, (int) ais.getFrameLength() * format.getFrameSize());
long length = AudioUtils.bytes2Millis(samples.length, format);
meta = new BasicMetaData(filename, length, samples.length);
}
SourceDataLine line = getSourceDataLine(format, 2048);
if (line != null) {
return new JSAudioRecording(this, samples, line, meta);
}
}
return null;
}
Aggregations