use of org.bytedeco.javacpp.IntPointer in project javacv by bytedeco.
the class Frame method cloneBufferArray.
/**
* This private method takes a buffer array as input and returns a deep copy.
* It is assumed that all buffers in the input array are of the same subclass.
*
* @param srcBuffers - Buffer array to be cloned
* @param clonedBuffers - Buffer array to fill with clones
* @return Opaque object to store
*
* @author Extension proposed by Dragos Dutu
*/
private static Pointer cloneBufferArray(Buffer[] srcBuffers, Buffer[] clonedBuffers) {
Pointer opaque = null;
if (srcBuffers != null && srcBuffers.length > 0) {
int totalCapacity = 0;
for (int i = 0; i < srcBuffers.length; i++) {
srcBuffers[i].rewind();
totalCapacity += srcBuffers[i].capacity();
}
if (srcBuffers[0] instanceof ByteBuffer) {
BytePointer pointer = new BytePointer(totalCapacity);
for (int i = 0; i < srcBuffers.length; i++) {
clonedBuffers[i] = pointer.limit(pointer.position() + srcBuffers[i].limit()).asBuffer().put((ByteBuffer) srcBuffers[i]);
pointer.position(pointer.limit());
}
opaque = pointer;
} else if (srcBuffers[0] instanceof ShortBuffer) {
ShortPointer pointer = new ShortPointer(totalCapacity);
for (int i = 0; i < srcBuffers.length; i++) {
clonedBuffers[i] = pointer.limit(pointer.position() + srcBuffers[i].limit()).asBuffer().put((ShortBuffer) srcBuffers[i]);
pointer.position(pointer.limit());
}
opaque = pointer;
} else if (srcBuffers[0] instanceof IntBuffer) {
IntPointer pointer = new IntPointer(totalCapacity);
for (int i = 0; i < srcBuffers.length; i++) {
clonedBuffers[i] = pointer.limit(pointer.position() + srcBuffers[i].limit()).asBuffer().put((IntBuffer) srcBuffers[i]);
pointer.position(pointer.limit());
}
opaque = pointer;
} else if (srcBuffers[0] instanceof LongBuffer) {
LongPointer pointer = new LongPointer(totalCapacity);
for (int i = 0; i < srcBuffers.length; i++) {
clonedBuffers[i] = pointer.limit(pointer.position() + srcBuffers[i].limit()).asBuffer().put((LongBuffer) srcBuffers[i]);
pointer.position(pointer.limit());
}
opaque = pointer;
} else if (srcBuffers[0] instanceof FloatBuffer) {
FloatPointer pointer = new FloatPointer(totalCapacity);
for (int i = 0; i < srcBuffers.length; i++) {
clonedBuffers[i] = pointer.limit(pointer.position() + srcBuffers[i].limit()).asBuffer().put((FloatBuffer) srcBuffers[i]);
pointer.position(pointer.limit());
}
opaque = pointer;
} else if (srcBuffers[0] instanceof DoubleBuffer) {
DoublePointer pointer = new DoublePointer(totalCapacity);
for (int i = 0; i < srcBuffers.length; i++) {
clonedBuffers[i] = pointer.limit(pointer.position() + srcBuffers[i].limit()).asBuffer().put((DoubleBuffer) srcBuffers[i]);
pointer.position(pointer.limit());
}
opaque = pointer;
}
for (int i = 0; i < srcBuffers.length; i++) {
srcBuffers[i].rewind();
clonedBuffers[i].rewind();
}
}
if (opaque != null) {
opaque.retainReference();
}
return opaque;
}
use of org.bytedeco.javacpp.IntPointer in project javacv by bytedeco.
the class FFmpegFrameFilter method pushSamples.
public synchronized void pushSamples(int n, int audioChannels, int sampleRate, int sampleFormat, Buffer... samples) throws Exception {
try (PointerScope scope = new PointerScope()) {
if (!started) {
throw new Exception("start() was not called successfully!");
}
int ret;
Pointer[] data = new Pointer[samples.length];
int sampleSize = samples != null ? ((samples[0].limit() - samples[0].position()) / (samples.length > 1 ? 1 : audioChannels)) : 0;
if (samples != null && samples[0] instanceof ByteBuffer) {
sampleFormat = data.length > 1 ? AV_SAMPLE_FMT_U8P : AV_SAMPLE_FMT_U8;
for (int i = 0; i < data.length; i++) {
data[i] = new BytePointer((ByteBuffer) samples[i]);
}
} else if (samples != null && samples[0] instanceof ShortBuffer) {
sampleFormat = data.length > 1 ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
for (int i = 0; i < data.length; i++) {
data[i] = new ShortPointer((ShortBuffer) samples[i]);
}
} else if (samples != null && samples[0] instanceof IntBuffer) {
sampleFormat = data.length > 1 ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
for (int i = 0; i < data.length; i++) {
data[i] = new IntPointer((IntBuffer) samples[i]);
}
} else if (samples != null && samples[0] instanceof FloatBuffer) {
sampleFormat = data.length > 1 ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
for (int i = 0; i < data.length; i++) {
data[i] = new FloatPointer((FloatBuffer) samples[i]);
}
} else if (samples != null && samples[0] instanceof DoubleBuffer) {
sampleFormat = data.length > 1 ? AV_SAMPLE_FMT_DBLP : AV_SAMPLE_FMT_DBL;
for (int i = 0; i < data.length; i++) {
data[i] = new DoublePointer((DoubleBuffer) samples[i]);
}
} else if (samples != null) {
for (int i = 0; i < data.length; i++) {
data[i] = new Pointer(samples[i]);
}
}
av_samples_fill_arrays(new PointerPointer(samples_frame), samples_frame.linesize(), new BytePointer(data[0]), audioChannels, sampleSize, sampleFormat, 1);
for (int i = 0; i < samples.length; i++) {
samples_frame.data(i, new BytePointer(data[i]));
}
samples_frame.channels(audioChannels);
samples_frame.channel_layout(av_get_default_channel_layout(audioChannels));
samples_frame.nb_samples(sampleSize);
samples_frame.format(sampleFormat);
samples_frame.sample_rate(sampleRate);
/* push the decoded frame into the filtergraph */
if ((ret = av_buffersrc_add_frame_flags(abuffersrc_ctx[n], samples_frame, AV_BUFFERSRC_FLAG_KEEP_REF | AV_BUFFERSRC_FLAG_PUSH)) < 0) {
throw new Exception("av_buffersrc_add_frame_flags() error " + ret + ": Error while feeding the filtergraph.");
}
}
}
use of org.bytedeco.javacpp.IntPointer in project bigbluebutton by bigbluebutton.
the class FFmpegFrameRecorder method recordSamples.
public boolean recordSamples(int sampleRate, int audioChannels, Buffer... samples) throws Exception {
if (audio_st == null) {
throw new Exception("No audio output stream (Is audioChannels > 0 and has start() been called?)");
}
int ret;
if (sampleRate <= 0) {
sampleRate = audio_c.sample_rate();
}
if (audioChannels <= 0) {
audioChannels = audio_c.channels();
}
int inputSize = samples != null ? samples[0].limit() - samples[0].position() : 0;
int inputFormat = AV_SAMPLE_FMT_NONE;
int inputChannels = samples != null && samples.length > 1 ? 1 : audioChannels;
int inputDepth = 0;
int outputFormat = audio_c.sample_fmt();
int outputChannels = samples_out.length > 1 ? 1 : audio_c.channels();
int outputDepth = av_get_bytes_per_sample(outputFormat);
if (samples != null && samples[0] instanceof ByteBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_U8P : AV_SAMPLE_FMT_U8;
inputDepth = 1;
for (int i = 0; i < samples.length; i++) {
ByteBuffer b = (ByteBuffer) samples[i];
if (samples_in[i] instanceof BytePointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
((BytePointer) samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
} else {
samples_in[i] = new BytePointer(b);
}
}
} else if (samples != null && samples[0] instanceof ShortBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
inputDepth = 2;
for (int i = 0; i < samples.length; i++) {
ShortBuffer b = (ShortBuffer) samples[i];
if (samples_in[i] instanceof ShortPointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
((ShortPointer) samples_in[i]).position(0).put(b.array(), samples[i].position(), inputSize);
} else {
samples_in[i] = new ShortPointer(b);
}
}
} else if (samples != null && samples[0] instanceof IntBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
inputDepth = 4;
for (int i = 0; i < samples.length; i++) {
IntBuffer b = (IntBuffer) samples[i];
if (samples_in[i] instanceof IntPointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
((IntPointer) samples_in[i]).position(0).put(b.array(), samples[i].position(), inputSize);
} else {
samples_in[i] = new IntPointer(b);
}
}
} else if (samples != null && samples[0] instanceof FloatBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
inputDepth = 4;
for (int i = 0; i < samples.length; i++) {
FloatBuffer b = (FloatBuffer) samples[i];
if (samples_in[i] instanceof FloatPointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
((FloatPointer) samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
} else {
samples_in[i] = new FloatPointer(b);
}
}
} else if (samples != null && samples[0] instanceof DoubleBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_DBLP : AV_SAMPLE_FMT_DBL;
inputDepth = 8;
for (int i = 0; i < samples.length; i++) {
DoubleBuffer b = (DoubleBuffer) samples[i];
if (samples_in[i] instanceof DoublePointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
((DoublePointer) samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
} else {
samples_in[i] = new DoublePointer(b);
}
}
} else if (samples != null) {
throw new Exception("Audio samples Buffer has unsupported type: " + samples);
}
if (samples_convert_ctx == null || samples_channels != audioChannels || samples_format != inputFormat || samples_rate != sampleRate) {
samples_convert_ctx = swr_alloc_set_opts(samples_convert_ctx, audio_c.channel_layout(), outputFormat, audio_c.sample_rate(), av_get_default_channel_layout(audioChannels), inputFormat, sampleRate, 0, null);
if (samples_convert_ctx == null) {
throw new Exception("swr_alloc_set_opts() error: Cannot allocate the conversion context.");
} else if ((ret = swr_init(samples_convert_ctx)) < 0) {
throw new Exception("swr_init() error " + ret + ": Cannot initialize the conversion context.");
}
samples_channels = audioChannels;
samples_format = inputFormat;
samples_rate = sampleRate;
}
for (int i = 0; samples != null && i < samples.length; i++) {
samples_in[i].position(samples_in[i].position() * inputDepth).limit((samples_in[i].position() + inputSize) * inputDepth);
}
while (true) {
int inputCount = (int) Math.min(samples != null ? (samples_in[0].limit() - samples_in[0].position()) / (inputChannels * inputDepth) : 0, Integer.MAX_VALUE);
int outputCount = (int) Math.min((samples_out[0].limit() - samples_out[0].position()) / (outputChannels * outputDepth), Integer.MAX_VALUE);
inputCount = Math.min(inputCount, (outputCount * sampleRate + audio_c.sample_rate() - 1) / audio_c.sample_rate());
for (int i = 0; samples != null && i < samples.length; i++) {
samples_in_ptr.put(i, samples_in[i]);
}
for (int i = 0; i < samples_out.length; i++) {
samples_out_ptr.put(i, samples_out[i]);
}
if ((ret = swr_convert(samples_convert_ctx, samples_out_ptr, outputCount, samples_in_ptr, inputCount)) < 0) {
throw new Exception("swr_convert() error " + ret + ": Cannot convert audio samples.");
} else if (ret == 0) {
break;
}
for (int i = 0; samples != null && i < samples.length; i++) {
samples_in[i].position(samples_in[i].position() + inputCount * inputChannels * inputDepth);
}
for (int i = 0; i < samples_out.length; i++) {
samples_out[i].position(samples_out[i].position() + ret * outputChannels * outputDepth);
}
if (samples == null || samples_out[0].position() >= samples_out[0].limit()) {
frame.nb_samples(audio_input_frame_size);
avcodec_fill_audio_frame(frame, audio_c.channels(), outputFormat, samples_out[0], (int) Math.min(samples_out[0].limit(), Integer.MAX_VALUE), 0);
for (int i = 0; i < samples_out.length; i++) {
frame.data(i, samples_out[i].position(0));
frame.linesize(i, (int) Math.min(samples_out[i].limit(), Integer.MAX_VALUE));
}
frame.quality(audio_c.global_quality());
record(frame);
}
}
return samples != null ? frame.key_frame() != 0 : record((AVFrame) null);
}
use of org.bytedeco.javacpp.IntPointer in project nd4j by deeplearning4j.
the class JcublasLevel1 method isamax.
@Override
protected int isamax(int N, INDArray X, int incX) {
if (Nd4j.dataType() != DataBuffer.Type.FLOAT)
logger.warn("FLOAT iamax called");
Nd4j.getExecutioner().push();
CudaContext ctx = allocator.getFlowController().prepareAction(null, X);
int ret2;
CublasPointer xCPointer = new CublasPointer(X, ctx);
cublasHandle_t handle = ctx.getHandle();
synchronized (handle) {
cublasSetStream_v2(new cublasContext(handle), new CUstream_st(ctx.getOldStream()));
IntPointer resultPointer = new IntPointer(new int[] { 0 });
cublasIsamax_v2(new cublasContext(handle), N, (FloatPointer) xCPointer.getDevicePointer(), incX, resultPointer);
ret2 = resultPointer.get();
}
allocator.registerAction(ctx, null, X);
return ret2 - 1;
}
use of org.bytedeco.javacpp.IntPointer in project nd4j by deeplearning4j.
the class JcublasLapack method sgeqrf.
// =========================
// Q R DECOMP
@Override
public void sgeqrf(int M, int N, INDArray A, INDArray R, INDArray INFO) {
INDArray a = A;
INDArray r = R;
if (Nd4j.dataType() != DataBuffer.Type.FLOAT)
log.warn("FLOAT getrf called in DOUBLE environment");
if (A.ordering() == 'c')
a = A.dup('f');
if (R != null && R.ordering() == 'c')
r = R.dup('f');
INDArray tau = Nd4j.createArrayFromShapeBuffer(Nd4j.getDataBufferFactory().createFloat(N), Nd4j.getShapeInfoProvider().createShapeInformation(new int[] { 1, N }).getFirst());
if (Nd4j.getExecutioner() instanceof GridExecutioner)
((GridExecutioner) Nd4j.getExecutioner()).flushQueue();
// Get context for current thread
CudaContext ctx = (CudaContext) allocator.getDeviceContext().getContext();
// setup the solver handles for cuSolver calls
cusolverDnHandle_t handle = ctx.getSolverHandle();
cusolverDnContext solverDn = new cusolverDnContext(handle);
// synchronized on the solver
synchronized (handle) {
int result = cusolverDnSetStream(new cusolverDnContext(handle), new CUstream_st(ctx.getOldStream()));
if (result != 0)
throw new IllegalStateException("solverSetStream failed");
// transfer the INDArray into GPU memory
CublasPointer xAPointer = new CublasPointer(a, ctx);
CublasPointer xTauPointer = new CublasPointer(tau, ctx);
// this output - indicates how much memory we'll need for the real operation
DataBuffer worksizeBuffer = Nd4j.getDataBufferFactory().createInt(1);
int stat = cusolverDnSgeqrf_bufferSize(solverDn, M, N, (FloatPointer) xAPointer.getDevicePointer(), M, // we intentionally use host pointer here
(IntPointer) worksizeBuffer.addressPointer());
if (stat != CUSOLVER_STATUS_SUCCESS) {
throw new BlasException("cusolverDnSgeqrf_bufferSize failed", stat);
}
int worksize = worksizeBuffer.getInt(0);
// Now allocate memory for the workspace, the permutation matrix and a return code
Pointer workspace = new Workspace(worksize * Nd4j.sizeOfDataType());
// Do the actual QR decomp
stat = cusolverDnSgeqrf(solverDn, M, N, (FloatPointer) xAPointer.getDevicePointer(), M, (FloatPointer) xTauPointer.getDevicePointer(), new CudaPointer(workspace).asFloatPointer(), worksize, new CudaPointer(allocator.getPointer(INFO, ctx)).asIntPointer());
if (stat != CUSOLVER_STATUS_SUCCESS) {
throw new BlasException("cusolverDnSgeqrf failed", stat);
}
allocator.registerAction(ctx, a);
// allocator.registerAction(ctx, tau);
allocator.registerAction(ctx, INFO);
if (INFO.getInt(0) != 0) {
throw new BlasException("cusolverDnSgeqrf failed on INFO", INFO.getInt(0));
}
// Copy R ( upper part of Q ) into result
if (r != null) {
r.assign(a.get(NDArrayIndex.interval(0, a.columns()), NDArrayIndex.all()));
INDArrayIndex[] ix = new INDArrayIndex[2];
for (int i = 1; i < Math.min(a.rows(), a.columns()); i++) {
ix[0] = NDArrayIndex.point(i);
ix[1] = NDArrayIndex.interval(0, i);
r.put(ix, 0);
}
}
stat = cusolverDnSorgqr_bufferSize(solverDn, M, N, N, (FloatPointer) xAPointer.getDevicePointer(), M, (FloatPointer) xTauPointer.getDevicePointer(), (IntPointer) worksizeBuffer.addressPointer());
worksize = worksizeBuffer.getInt(0);
workspace = new Workspace(worksize * Nd4j.sizeOfDataType());
stat = cusolverDnSorgqr(solverDn, M, N, N, (FloatPointer) xAPointer.getDevicePointer(), M, (FloatPointer) xTauPointer.getDevicePointer(), new CudaPointer(workspace).asFloatPointer(), worksize, new CudaPointer(allocator.getPointer(INFO, ctx)).asIntPointer());
if (stat != CUSOLVER_STATUS_SUCCESS) {
throw new BlasException("cusolverDnSorgqr failed", stat);
}
}
allocator.registerAction(ctx, a);
allocator.registerAction(ctx, INFO);
if (a != A)
A.assign(a);
if (r != null && r != R)
R.assign(r);
log.info("A: {}", A);
if (R != null)
log.info("R: {}", R);
}
Aggregations