use of javax.sound.sampled.AudioInputStream in project intellij-community by JetBrains.
the class UIUtil method playSoundFromStream.
public static void playSoundFromStream(final Factory<InputStream> streamProducer) {
new Thread(new Runnable() {
// The wrapper thread is unnecessary, unless it blocks on the
// Clip finishing; see comments.
@Override
public void run() {
try {
Clip clip = AudioSystem.getClip();
InputStream stream = streamProducer.create();
if (!stream.markSupported())
stream = new BufferedInputStream(stream);
AudioInputStream inputStream = AudioSystem.getAudioInputStream(stream);
clip.open(inputStream);
clip.start();
} catch (Exception ignore) {
LOG.info(ignore);
}
}
}, "play sound").start();
}
use of javax.sound.sampled.AudioInputStream in project ACS by ACS-Community.
the class AlarmSound method play.
/**
* Play the sound for the given priority
*
* @param priority The priority of the alarm
*/
private void play(int priority) throws Exception {
if (priority < 0 || priority > 3) {
throw new IllegalStateException("Invalid alarm priority " + priority);
}
URL url = soundURLs[priority];
AudioInputStream audioInputStream = null;
try {
audioInputStream = AudioSystem.getAudioInputStream(url);
} catch (Throwable t) {
// If there is an error then the panel does nothing
// It might happen for example if another application
// is locking the audio.
System.err.println(t.getMessage());
t.printStackTrace();
return;
}
// Obtain the information about the AudioInputStream
AudioFormat audioFormat = audioInputStream.getFormat();
SourceDataLine line = null;
DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);
// Get the list of available mixers
Mixer.Info[] mixersInfo = AudioSystem.getMixerInfo();
// one is available is found
for (int i = 0; i < mixersInfo.length && line == null; i++) {
Mixer.Info mi = mixersInfo[i];
try {
Mixer mixer = AudioSystem.getMixer(mi);
line = (SourceDataLine) mixer.getLine(info);
} catch (LineUnavailableException lue) {
System.err.println("Line unavailable " + lue.getMessage());
line = null;
continue;
} catch (Throwable t) {
System.err.println("Exception getting the line " + t.getMessage());
line = null;
continue;
}
try {
line.open(audioFormat, EXTERNAL_BUFFER_SIZE);
} catch (Throwable t) {
System.err.println("Error opeining the line: " + t.getMessage());
line = null;
continue;
}
try {
line.start();
} catch (Throwable t) {
System.err.println("Error starting the line: " + t.getMessage());
line = null;
continue;
}
try {
playOnLine(line, audioInputStream);
} catch (Throwable t) {
System.err.println("Error playing: " + t.getMessage());
line = null;
continue;
}
// plays what's left and and closes the audioChannel
line.drain();
line.close();
}
}
use of javax.sound.sampled.AudioInputStream in project jdk8u_jdk by JetBrains.
the class NoteOverFlowTest2 method main.
public static void main(String[] args) throws Exception {
// Create instance of the synthesizer with very low polyphony
AudioSynthesizer synth = new SoftSynthesizer();
AudioFormat format = new AudioFormat(44100, 16, 2, true, false);
Map<String, Object> p = new HashMap<String, Object>();
p.put("max polyphony", new Integer(5));
AudioInputStream stream = synth.openStream(format, p);
// Create instrument with too many regions (more than max polyphony)
SF2Soundbank sf2 = new SF2Soundbank();
SF2Sample sample = new SF2Sample(sf2);
sample.setName("test sample");
sample.setData(new byte[100]);
sample.setSampleRate(44100);
sample.setOriginalPitch(20);
sf2.addResource(sample);
SF2Layer layer = new SF2Layer(sf2);
layer.setName("test layer");
sf2.addResource(layer);
for (int i = 0; i < 100; i++) {
SF2LayerRegion region = new SF2LayerRegion();
region.setSample(sample);
layer.getRegions().add(region);
}
SF2Instrument ins = new SF2Instrument(sf2);
ins.setPatch(new Patch(0, 0));
ins.setName("test instrument");
sf2.addInstrument(ins);
SF2InstrumentRegion insregion = new SF2InstrumentRegion();
insregion.setLayer(layer);
ins.getRegions().add(insregion);
// Load the test soundbank into the synthesizer
synth.unloadAllInstruments(synth.getDefaultSoundbank());
synth.loadAllInstruments(sf2);
// Send out one midi on message
MidiChannel ch1 = synth.getChannels()[0];
ch1.programChange(0);
ch1.noteOn(64, 64);
// Read 1 sec from stream
stream.skip(format.getFrameSize() * ((int) (format.getFrameRate() * 2)));
// Close the synthesizer after use
synth.close();
}
use of javax.sound.sampled.AudioInputStream in project jdk8u_jdk by JetBrains.
the class TestPreciseTimestampRendering method test.
public static void test(Soundbank soundbank) throws Exception {
// Create instance of synthesizer using the testing soundbank above
AudioSynthesizer synth = new SoftSynthesizer();
AudioInputStream stream = synth.openStream(format, null);
synth.unloadAllInstruments(synth.getDefaultSoundbank());
synth.loadAllInstruments(soundbank);
Receiver recv = synth.getReceiver();
// Set volume to max and turn reverb off
ShortMessage reverb_off = new ShortMessage();
reverb_off.setMessage(ShortMessage.CONTROL_CHANGE, 91, 0);
recv.send(reverb_off, -1);
ShortMessage full_volume = new ShortMessage();
full_volume.setMessage(ShortMessage.CONTROL_CHANGE, 7, 127);
recv.send(full_volume, -1);
Random random = new Random(3485934583945l);
// Create random timestamps
long[] test_timestamps = new long[30];
for (int i = 1; i < test_timestamps.length; i++) {
test_timestamps[i] = i * 44100 + (int) (random.nextDouble() * 22050.0);
}
// Send midi note on message to synthesizer
for (int i = 0; i < test_timestamps.length; i++) {
ShortMessage midi_on = new ShortMessage();
midi_on.setMessage(ShortMessage.NOTE_ON, 69, 127);
recv.send(midi_on, (long) ((test_timestamps[i] / 44100.0) * 1000000.0));
}
// Measure timing from rendered audio
float[] fbuffer = new float[100];
byte[] buffer = new byte[fbuffer.length * format.getFrameSize()];
long firsts = -1;
int counter = 0;
long s = 0;
long max_jitter = 0;
outerloop: for (int k = 0; k < 10000000; k++) {
stream.read(buffer);
AudioFloatConverter.getConverter(format).toFloatArray(buffer, fbuffer);
for (int i = 0; i < fbuffer.length; i++) {
if (fbuffer[i] != 0) {
if (firsts == -1)
firsts = s;
long measure_time = (s - firsts);
long predicted_time = test_timestamps[counter];
long jitter = Math.abs(measure_time - predicted_time);
if (jitter > 10)
max_jitter = jitter;
counter++;
if (counter == test_timestamps.length)
break outerloop;
}
s++;
}
}
synth.close();
if (counter == 0)
throw new Exception("Nothing was measured!");
if (max_jitter != 0) {
throw new Exception("Jitter has occurred! " + "(max jitter = " + max_jitter + ")");
}
}
use of javax.sound.sampled.AudioInputStream in project jdk8u_jdk by JetBrains.
the class AlawCodec method getConvertedStream.
// OLD CODE
/**
* Opens the codec with the specified parameters.
* @param stream stream from which data to be processed should be read
* @param outputFormat desired data format of the stream after processing
* @return stream from which processed data may be read
* @throws IllegalArgumentException if the format combination supplied is
* not supported.
*/
/* public AudioInputStream getConvertedStream(AudioFormat outputFormat, AudioInputStream stream) { */
private AudioInputStream getConvertedStream(AudioFormat outputFormat, AudioInputStream stream) {
AudioInputStream cs = null;
AudioFormat inputFormat = stream.getFormat();
if (inputFormat.matches(outputFormat)) {
cs = stream;
} else {
cs = (AudioInputStream) (new AlawCodecStream(stream, outputFormat));
}
return cs;
}
Aggregations