use of be.tarsos.dsp.AudioDispatcher in project cythara by gstraube.
the class AudioDispatcherFactory method fromPipe.
/**
* Create a stream from a piped sub process and use that to create a new
* {@link AudioDispatcher} The sub-process writes a WAV-header and
* PCM-samples to standard out. The header is ignored and the PCM samples
* are are captured and interpreted. Examples of executables that can
* convert audio in any format and write to stdout are ffmpeg and avconv.
*
* @param source
* The file or stream to capture.
* @param targetSampleRate
* The target sample rate.
* @param audioBufferSize
* The number of samples used in the buffer.
* @param bufferOverlap
* The number of samples to overlap the current and previous buffer.
* @return A new audioprocessor.
*/
public static AudioDispatcher fromPipe(final String source, final int targetSampleRate, final int audioBufferSize, final int bufferOverlap) {
PipedAudioStream f = new PipedAudioStream(source);
TarsosDSPAudioInputStream audioStream = f.getMonoStream(targetSampleRate, 0);
return new AudioDispatcher(audioStream, audioBufferSize, bufferOverlap);
}
use of be.tarsos.dsp.AudioDispatcher in project cythara by gstraube.
the class AudioDispatcherFactory method fromDefaultMicrophone.
/**
* Create a new AudioDispatcher connected to the default microphone.
*
* @param sampleRate
* The requested sample rate.
* @param audioBufferSize
* The size of the audio buffer (in samples).
*
* @param bufferOverlap
* The size of the overlap (in samples).
* @return A new AudioDispatcher
*/
public static AudioDispatcher fromDefaultMicrophone(final int sampleRate, final int audioBufferSize, final int bufferOverlap) {
int minAudioBufferSize = AudioRecord.getMinBufferSize(sampleRate, android.media.AudioFormat.CHANNEL_IN_MONO, android.media.AudioFormat.ENCODING_PCM_16BIT);
int minAudioBufferSizeInSamples = minAudioBufferSize / 2;
if (minAudioBufferSizeInSamples <= audioBufferSize) {
AudioRecord audioInputStream = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRate, android.media.AudioFormat.CHANNEL_IN_MONO, android.media.AudioFormat.ENCODING_PCM_16BIT, audioBufferSize * 2);
TarsosDSPAudioFormat format = new TarsosDSPAudioFormat(sampleRate, 16, 1, true, false);
TarsosDSPAudioInputStream audioStream = new AndroidAudioInputStream(audioInputStream, format);
// start recording ! Opens the stream.
audioInputStream.startRecording();
return new AudioDispatcher(audioStream, audioBufferSize, bufferOverlap);
} else {
throw new IllegalArgumentException("Buffer size too small should be at least " + (minAudioBufferSize * 2));
}
}
use of be.tarsos.dsp.AudioDispatcher in project PICKSARI by HyunJunYANG.
the class PitchDetect method detect.
// 음역대를 측정하는 함수
public void detect() {
AudioDispatcher dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(22050, 1024, 0);
// 0은 초기 1은 성공 2는 실패
pitchSuccess = 0;
Log.d(LOG_TAG, "start");
// PitchDetect Thread
PitchDetectionHandler pdh = new PitchDetectionHandler() {
@Override
public void handlePitch(PitchDetectionResult result, AudioEvent e) {
final float pitchInHz = result.getPitch();
runOnUiThread(new Runnable() {
@SuppressLint("ResourceType")
@Override
public void run() {
// 음이 기준보다 높으면 이미지바꾸고 sleep
if (!thread.isInterrupted()) {
if (pitchInHz > hz[hzIndex]) {
Log.d(LOG_TAG, String.valueOf(hz[hzIndex]));
pitchText.setText(scale[scaleIndex]);
pitch = pitchInHz;
// 인덱스를 늘린다
hzIndex = hzIndex + 1;
scaleIndex = scaleIndex + 1;
pitchSuccess = 1;
// 타이머를 죽인다. 성공하면 타이머 리셋해야하기 때문
timer.cancel();
manPicture.setImageResource(R.drawable.man_3);
// PostDelay 그림을 바꾸기 위한 딜레이
try {
Log.d(LOG_TAG, "ThreadSleepStart");
// thread.sleep(1000);
handler.postDelayed(runnable, 1000);
// 타이머 리셋
detectTime();
// thread.interrupt();
// Log.d(LOG_TAG, "ThreadSleepInterrupted");
Log.d(LOG_TAG, "ThreadSleepRestart");
} catch (Exception e) {
e.printStackTrace();
}
} else {
// 음이 기준보다 낮으면
// Log.d(LOG_TAG, "ThreadSleep Pitch is low.");
pitchSuccess = 2;
}
}
}
});
}
};
// 선언
AudioProcessor p = new PitchProcessor(PitchProcessor.PitchEstimationAlgorithm.FFT_YIN, 22050, 1024, pdh);
dispatcher.addAudioProcessor(p);
thread = new Thread(dispatcher, "Audio Dispatcher");
thread.start();
}
use of be.tarsos.dsp.AudioDispatcher in project PICKSARI by HyunJunYANG.
the class TarsosDSP method detect.
public boolean detect() {
AudioDispatcher dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(22050, 1024, 0);
Log.d("StartaudioDetect", "start");
PitchDetectionHandler pdh = new PitchDetectionHandler() {
@Override
public void handlePitch(PitchDetectionResult result, AudioEvent e) {
final float pitchInHz = result.getPitch();
thread = new Thread(new Runnable() {
@Override
public void run() {
if (pitchInHz > pitch) {
Log.d("StartAudioDetect", "start2");
pitch = pitchInHz;
bool = true;
// sendMessage(pitch);
/*
text = (TextView) findViewById(R.id.textView1);
text.setText("" + pitchInHz);
pitchChange();
*/
} else {
bool = false;
}
}
});
}
};
AudioProcessor p = new PitchProcessor(PitchProcessor.PitchEstimationAlgorithm.FFT_YIN, 22050, 1024, pdh);
dispatcher.addAudioProcessor(p);
thread = new Thread(dispatcher, "Audio Dispatcher");
thread.start();
return bool;
}
Aggregations