use of java.nio.DoubleBuffer in project ffx by mjschnie.
the class DoubleMatrixBuf method receiveItems.
/**
* {@inheritDoc}
*
* Receive as many items as possible from the given byte buffer to this
* buffer.
* <P>
* The <TT>receiveItems()</TT> method must not block the calling thread; if
* it does, all message I/O in MP will be blocked.
*/
protected int receiveItems(int i, int num, ByteBuffer buffer) {
DoubleBuffer doublebuffer = buffer.asDoubleBuffer();
num = Math.min(num, doublebuffer.remaining());
int n = 0;
int r = i2r(i);
int row = r * myRowStride + myLowerRow;
int c = i2c(i);
int col = c * myColStride + myLowerCol;
int ncols = Math.min(myColCount - c, num);
while (r < myRowCount && ncols > 0) {
double[] myMatrix_row = myMatrix[row];
for (c = 0; c < ncols; ++c) {
myMatrix_row[col] = doublebuffer.get();
col += myColStride;
}
num -= ncols;
n += ncols;
++r;
row += myRowStride;
col = myLowerCol;
ncols = Math.min(myColCount, num);
}
buffer.position(buffer.position() + 8 * n);
return n;
}
use of java.nio.DoubleBuffer in project ffx by mjschnie.
the class DoubleMatrixBuf method sendItems.
// Hidden operations.
/**
* {@inheritDoc}
*
* Send as many items as possible from this buffer to the given byte buffer.
* <P>
* The <TT>sendItems()</TT> method must not block the calling thread; if it
* does, all message I/O in MP will be blocked.
*/
protected int sendItems(int i, ByteBuffer buffer) {
DoubleBuffer doublebuffer = buffer.asDoubleBuffer();
int n = 0;
int r = i2r(i);
int row = r * myRowStride + myLowerRow;
int c = i2c(i);
int col = c * myColStride + myLowerCol;
int ncols = Math.min(myColCount - c, doublebuffer.remaining());
while (r < myRowCount && ncols > 0) {
double[] myMatrix_row = myMatrix[row];
while (c < ncols) {
doublebuffer.put(myMatrix_row[col]);
++c;
col += myColStride;
}
n += ncols;
++r;
row += myRowStride;
c = 0;
col = myLowerCol;
ncols = Math.min(myColCount, doublebuffer.remaining());
}
buffer.position(buffer.position() + 8 * n);
return n;
}
use of java.nio.DoubleBuffer in project imageio-ext by geosolutions-it.
the class NITFReader method readFullImage.
/**
* Optimization to read the entire image in one fell swoop... This is most likely the common use case for this codec, so we hope this optimization
* will be helpful.
*
* @param imageIndex
* @param sourceXSubsampling
* @param sourceYSubsampling
* @param bandOffsets
* @param pixelSize
* @param imRas
* @throws IOException
*/
protected void readFullImage(int imageIndex, Rectangle destRegion, int sourceXSubsampling, int sourceYSubsampling, int[] bandOffsets, int pixelSize, WritableRaster imRas) throws IOException {
try {
ImageSubheader subheader = record.getImages()[imageIndex].getSubheader();
int numCols = destRegion.width;
int numRows = destRegion.height;
int nBands = subheader.getBandCount();
/*
* NOTE: This is a "fix" that will be removed once the underlying NITRO library gets patched. Currently, if you make a request of a single
* band, it doesn't matter which band you request - the data from the first band will be returned regardless. This is obviously wrong. To
* thwart this, we will read all bands, then scale down what we return to the user based on their actual request.
*/
int[] requestBands = bandOffsets;
/*
* if (nBands != bandOffsets.length && bandOffsets.length == 1 && bandOffsets[0] != 0) { requestBands = new int[nBands]; for (int i = 0; i
* < nBands; ++i) requestBands[i] = i; }
*/
int bufSize = numCols * numRows * pixelSize;
byte[][] imageBuf = new byte[requestBands.length][bufSize];
// make a SubWindow from the params
// TODO may want to read by blocks or rows to make faster and more
// memory efficient
SubWindow window;
window = new SubWindow();
window.setNumBands(requestBands.length);
window.setBandList(requestBands);
window.setNumCols(numCols);
window.setNumRows(numRows);
window.setStartCol(0);
window.setStartRow(0);
// the NITRO library can do the subsampling for us
if (sourceYSubsampling != 1 || sourceXSubsampling != 1) {
DownSampler downSampler = new PixelSkipDownSampler(sourceYSubsampling, sourceXSubsampling);
window.setDownSampler(downSampler);
}
// String pixelJustification = subheader.getPixelJustification()
// .getStringData().trim();
// boolean shouldSwap = pixelJustification.equals("R");
// since this is Java, we need the data in big-endian format
// boolean shouldSwap = ByteOrder.nativeOrder() !=
// ByteOrder.BIG_ENDIAN;
nitf.ImageReader imageReader = getImageReader(imageIndex);
imageReader.read(window, imageBuf);
List<ByteBuffer> bandBufs = new ArrayList<ByteBuffer>();
for (int i = 0; i < bandOffsets.length; ++i) {
ByteBuffer bandBuf = null;
// the special "fix" we added needs to do this
if (bandOffsets.length != requestBands.length) {
bandBuf = ByteBuffer.wrap(imageBuf[bandOffsets[i]]);
} else {
bandBuf = ByteBuffer.wrap(imageBuf[i]);
}
// ban dBuf.order(ByteOrder.nativeOrder());
// shouldSwap ? ByteOrder.LITTLE_ENDIAN
// : ByteOrder.BIG_ENDIAN);
bandBufs.add(bandBuf);
}
// optimization for 1 band case... just dump the whole thing
if (bandOffsets.length == 1) {
ByteBuffer bandBuf = bandBufs.get(0);
switch(pixelSize) {
case 1:
ByteBuffer rasterByteBuf = ByteBuffer.wrap(((DataBufferByte) imRas.getDataBuffer()).getData());
rasterByteBuf.put(bandBuf);
break;
case 2:
ShortBuffer rasterShortBuf = ShortBuffer.wrap(((DataBufferUShort) imRas.getDataBuffer()).getData());
rasterShortBuf.put(bandBuf.asShortBuffer());
break;
case 4:
FloatBuffer rasterFloatBuf = FloatBuffer.wrap(((DataBufferFloat) imRas.getDataBuffer()).getData());
rasterFloatBuf.put(bandBuf.asFloatBuffer());
break;
case 8:
DoubleBuffer rasterDoubleBuf = DoubleBuffer.wrap(((DataBufferDouble) imRas.getDataBuffer()).getData());
rasterDoubleBuf.put(bandBuf.asDoubleBuffer());
break;
}
} else {
for (int srcY = 0, srcX = 0; srcY < numRows; srcY++) {
// Copy each (subsampled) source pixel into imRas
for (int dstX = 0; dstX < numCols; srcX += pixelSize, dstX++) {
for (int i = 0; i < bandOffsets.length; ++i) {
ByteBuffer bandBuf = bandBufs.get(i);
switch(pixelSize) {
case 1:
imRas.setSample(dstX, srcY, i, bandBuf.get(srcX));
break;
case 2:
imRas.setSample(dstX, srcY, i, bandBuf.getShort(srcX));
break;
case 4:
imRas.setSample(dstX, srcY, i, bandBuf.getFloat(srcX));
break;
case 8:
imRas.setSample(dstX, srcY, i, bandBuf.getDouble(srcX));
break;
}
}
}
}
}
} catch (NITFException e1) {
throw new IOException(e1);
}
}
use of java.nio.DoubleBuffer in project imageio-ext by geosolutions-it.
the class GDALImageReader method readDatasetRaster.
/**
* Read data from the required region of the raster.
*
* @param destSM
* sample model for the image
* @param dataset
* GDAL <code>Dataset</code> to read
* @param srcRegion
* the source Region to be read
* @param dstRegion
* the destination Region of the image read
* @param selectedBands
* an array specifying the requested bands
* @return the read <code>Raster</code>
*/
private Raster readDatasetRaster(SampleModel destSm, Dataset dataset, Rectangle srcRegion, Rectangle dstRegion, int[] selectedBands) throws IOException {
SampleModel sampleModel = null;
DataBuffer imgBuffer = null;
Band pBand = null;
try {
int dstWidth = dstRegion.width;
int dstHeight = dstRegion.height;
int srcRegionXOffset = srcRegion.x;
int srcRegionYOffset = srcRegion.y;
int srcRegionWidth = srcRegion.width;
int srcRegionHeight = srcRegion.height;
if (LOGGER.isLoggable(Level.FINE))
LOGGER.fine("SourceRegion = " + srcRegion.toString());
// Getting number of bands
final int nBands = selectedBands != null ? selectedBands.length : destSm.getNumBands();
int[] banks = new int[nBands];
int[] offsets = new int[nBands];
// setting the number of pixels to read
final int pixels = dstWidth * dstHeight;
int bufferType = 0, bufferSize = 0;
int typeSizeInBytes = 0;
// ////////////////////////////////////////////////////////////////////
//
// -------------------------------------------------------------------
// Raster Creation >>> Step 2: Data Read
// -------------------------------------------------------------------
//
// ////////////////////////////////////////////////////////////////////
// NOTE: Bands are not 0-base indexed, so we must add 1
pBand = dataset.GetRasterBand(1);
// setting buffer properties
bufferType = pBand.getDataType();
typeSizeInBytes = gdal.GetDataTypeSize(bufferType) / 8;
bufferSize = nBands * pixels * typeSizeInBytes;
// splitBands = false -> I read n Bands at once.
// splitBands = false -> I need to read 1 Band at a time.
boolean splitBands = false;
if (bufferSize < 0 || destSm instanceof BandedSampleModel) {
// The number resulting from the product
// "numBands*pixels*gdal.GetDataTypeSize(buf_type) / 8"
// may be negative (A very high number which is not
// "int representable")
// In such a case, we will read 1 band at a time.
bufferSize = pixels * typeSizeInBytes;
splitBands = true;
}
int dataBufferType = -1;
byte[][] byteBands = new byte[nBands][];
for (int k = 0; k < nBands; k++) {
// I quit the loop
if (k > 0 && !splitBands)
break;
final byte[] dataBuffer = new byte[bufferSize];
final int returnVal;
if (!splitBands) {
// I can read nBands at once.
final int[] bandsMap = new int[nBands];
if (selectedBands != null) {
for (int i = 0; i < nBands; i++) bandsMap[i] = selectedBands[i] + 1;
} else {
for (int i = 0; i < nBands; i++) bandsMap[i] = i + 1;
}
returnVal = dataset.ReadRaster(srcRegionXOffset, srcRegionYOffset, srcRegionWidth, srcRegionHeight, dstWidth, dstHeight, bufferType, dataBuffer, bandsMap, nBands * typeSizeInBytes, dstWidth * nBands * typeSizeInBytes, typeSizeInBytes);
byteBands[k] = dataBuffer;
} else {
// I need to read 1 band at a time.
Band rBand = null;
try {
rBand = dataset.GetRasterBand(k + 1);
returnVal = rBand.ReadRaster(srcRegionXOffset, srcRegionYOffset, srcRegionWidth, srcRegionHeight, dstWidth, dstHeight, bufferType, dataBuffer);
byteBands[k] = dataBuffer;
} finally {
if (rBand != null) {
try {
// Closing the band
rBand.delete();
} catch (Throwable e) {
if (LOGGER.isLoggable(Level.FINEST))
LOGGER.log(Level.FINEST, e.getLocalizedMessage(), e);
}
}
}
}
if (returnVal == gdalconstConstants.CE_None) {
if (!splitBands)
for (int band = 0; band < nBands; band++) {
banks[band] = band;
offsets[band] = band;
}
else {
banks[k] = k;
offsets[k] = 0;
}
} else {
// The read operation was not successfully computed.
// Showing error messages.
LOGGER.info(new StringBuilder("Last error: ").append(gdal.GetLastErrorMsg()).toString());
LOGGER.info(new StringBuilder("Last error number: ").append(gdal.GetLastErrorNo()).toString());
LOGGER.info(new StringBuilder("Last error type: ").append(gdal.GetLastErrorType()).toString());
throw new RuntimeException(gdal.GetLastErrorMsg());
}
}
// /////////////////////////////////////////////////////////////////////
if (bufferType == gdalconstConstants.GDT_Byte) {
if (!splitBands) {
// final byte[] bytes = new byte[nBands * pixels];
// bands[0].get(bytes, 0, nBands * pixels);
imgBuffer = new DataBufferByte(byteBands[0], nBands * pixels);
} else {
// final byte[][] bytes = new byte[nBands][];
// for (int i = 0; i < nBands; i++) {
// // bytes[i] = new byte[pixels];
// bands[i].get(bytes[i], 0, pixels);
// }
imgBuffer = new DataBufferByte(byteBands, pixels);
}
dataBufferType = DataBuffer.TYPE_BYTE;
} else {
ByteBuffer[] bands = new ByteBuffer[nBands];
for (int k = 0; (splitBands && k < nBands) || (k < 1 && !splitBands); k++) {
bands[k] = ByteBuffer.wrap(byteBands[k], 0, byteBands[k].length);
}
if (bufferType == gdalconstConstants.GDT_Int16 || bufferType == gdalconstConstants.GDT_UInt16) {
if (!splitBands) {
// I get short values from the ByteBuffer using a view
// of the ByteBuffer as a ShortBuffer
// It is worth to create the view outside the loop.
short[] shorts = new short[nBands * pixels];
bands[0].order(ByteOrder.nativeOrder());
final ShortBuffer buff = bands[0].asShortBuffer();
buff.get(shorts, 0, nBands * pixels);
if (bufferType == gdalconstConstants.GDT_Int16)
imgBuffer = new DataBufferShort(shorts, nBands * pixels);
else
imgBuffer = new DataBufferUShort(shorts, nBands * pixels);
} else {
short[][] shorts = new short[nBands][];
for (int i = 0; i < nBands; i++) {
shorts[i] = new short[pixels];
bands[i].order(ByteOrder.nativeOrder());
bands[i].asShortBuffer().get(shorts[i], 0, pixels);
}
if (bufferType == gdalconstConstants.GDT_Int16)
imgBuffer = new DataBufferShort(shorts, pixels);
else
imgBuffer = new DataBufferUShort(shorts, pixels);
}
if (bufferType == gdalconstConstants.GDT_UInt16)
dataBufferType = DataBuffer.TYPE_USHORT;
else
dataBufferType = DataBuffer.TYPE_SHORT;
} else if (bufferType == gdalconstConstants.GDT_Int32 || bufferType == gdalconstConstants.GDT_UInt32) {
if (!splitBands) {
// I get int values from the ByteBuffer using a view
// of the ByteBuffer as an IntBuffer
// It is worth to create the view outside the loop.
int[] ints = new int[nBands * pixels];
bands[0].order(ByteOrder.nativeOrder());
final IntBuffer buff = bands[0].asIntBuffer();
buff.get(ints, 0, nBands * pixels);
imgBuffer = new DataBufferInt(ints, nBands * pixels);
} else {
int[][] ints = new int[nBands][];
for (int i = 0; i < nBands; i++) {
ints[i] = new int[pixels];
bands[i].order(ByteOrder.nativeOrder());
bands[i].asIntBuffer().get(ints[i], 0, pixels);
}
imgBuffer = new DataBufferInt(ints, pixels);
}
dataBufferType = DataBuffer.TYPE_INT;
} else if (bufferType == gdalconstConstants.GDT_Float32) {
if (!splitBands) {
// I get float values from the ByteBuffer using a view
// of the ByteBuffer as a FloatBuffer
// It is worth to create the view outside the loop.
float[] floats = new float[nBands * pixels];
bands[0].order(ByteOrder.nativeOrder());
final FloatBuffer buff = bands[0].asFloatBuffer();
buff.get(floats, 0, nBands * pixels);
imgBuffer = new DataBufferFloat(floats, nBands * pixels);
} else {
float[][] floats = new float[nBands][];
for (int i = 0; i < nBands; i++) {
floats[i] = new float[pixels];
bands[i].order(ByteOrder.nativeOrder());
bands[i].asFloatBuffer().get(floats[i], 0, pixels);
}
imgBuffer = new DataBufferFloat(floats, pixels);
}
dataBufferType = DataBuffer.TYPE_FLOAT;
} else if (bufferType == gdalconstConstants.GDT_Float64) {
if (!splitBands) {
// I get double values from the ByteBuffer using a view
// of the ByteBuffer as a DoubleBuffer
// It is worth to create the view outside the loop.
double[] doubles = new double[nBands * pixels];
bands[0].order(ByteOrder.nativeOrder());
final DoubleBuffer buff = bands[0].asDoubleBuffer();
buff.get(doubles, 0, nBands * pixels);
imgBuffer = new DataBufferDouble(doubles, nBands * pixels);
} else {
double[][] doubles = new double[nBands][];
for (int i = 0; i < nBands; i++) {
doubles[i] = new double[pixels];
bands[i].order(ByteOrder.nativeOrder());
bands[i].asDoubleBuffer().get(doubles[i], 0, pixels);
}
imgBuffer = new DataBufferDouble(doubles, pixels);
}
dataBufferType = DataBuffer.TYPE_DOUBLE;
} else {
// TODO: Handle more cases if needed. Show the name of the type
// instead of the numeric value.
LOGGER.info("The specified data type is actually unsupported: " + bufferType);
}
}
// TODO: Fix this in compliance with the specified destSampleModel
if (splitBands)
sampleModel = new BandedSampleModel(dataBufferType, dstWidth, dstHeight, dstWidth, banks, offsets);
else
sampleModel = new PixelInterleavedSampleModel(dataBufferType, dstWidth, dstHeight, nBands, dstWidth * nBands, offsets);
} finally {
if (pBand != null) {
try {
// Closing the band
pBand.delete();
} catch (Throwable e) {
if (LOGGER.isLoggable(Level.FINE))
LOGGER.log(Level.FINE, e.getLocalizedMessage(), e);
}
}
}
// dstRegion.x, dstRegion.y));
return Raster.createWritableRaster(sampleModel, imgBuffer, null);
}
use of java.nio.DoubleBuffer in project VideoRecorder by qdrzwd.
the class NewFFmpegFrameRecorder method record.
@Override
public boolean record(int sampleRate, Buffer... samples) throws Exception {
if (audioSt == null) {
throw new Exception("No audio output stream (Is audioChannels > 0 and has start() been called?)");
}
int inputSize = samples[0].limit() - samples[0].position();
int inputDepth;
if (sampleRate <= 0) {
sampleRate = audioC.sample_rate();
}
int inputFormat;
if (samples[0] instanceof ByteBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_U8P : AV_SAMPLE_FMT_U8;
inputDepth = 1;
for (int i = 0; i < samples.length; i++) {
ByteBuffer b = (ByteBuffer) samples[i];
if (samplesIn[i] instanceof BytePointer && samplesIn[i].capacity() >= inputSize && b.hasArray()) {
((BytePointer) samplesIn[i]).position(0).put(b.array(), b.position(), inputSize);
} else {
samplesIn[i] = new BytePointer(b);
}
}
} else if (samples[0] instanceof ShortBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
inputDepth = 2;
for (int i = 0; i < samples.length; i++) {
ShortBuffer b = (ShortBuffer) samples[i];
if (samplesIn[i] instanceof ShortPointer && samplesIn[i].capacity() >= inputSize && b.hasArray()) {
((ShortPointer) samplesIn[i]).position(0).put(b.array(), samples[i].position(), inputSize);
} else {
samplesIn[i] = new ShortPointer(b);
}
}
} else if (samples[0] instanceof IntBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
inputDepth = 4;
for (int i = 0; i < samples.length; i++) {
IntBuffer b = (IntBuffer) samples[i];
if (samplesIn[i] instanceof IntPointer && samplesIn[i].capacity() >= inputSize && b.hasArray()) {
((IntPointer) samplesIn[i]).position(0).put(b.array(), samples[i].position(), inputSize);
} else {
samplesIn[i] = new IntPointer(b);
}
}
} else if (samples[0] instanceof FloatBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
inputDepth = 4;
for (int i = 0; i < samples.length; i++) {
FloatBuffer b = (FloatBuffer) samples[i];
if (samplesIn[i] instanceof FloatPointer && samplesIn[i].capacity() >= inputSize && b.hasArray()) {
((FloatPointer) samplesIn[i]).position(0).put(b.array(), b.position(), inputSize);
} else {
samplesIn[i] = new FloatPointer(b);
}
}
} else if (samples[0] instanceof DoubleBuffer) {
inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_DBLP : AV_SAMPLE_FMT_DBL;
inputDepth = 8;
for (int i = 0; i < samples.length; i++) {
DoubleBuffer b = (DoubleBuffer) samples[i];
if (samplesIn[i] instanceof DoublePointer && samplesIn[i].capacity() >= inputSize && b.hasArray()) {
((DoublePointer) samplesIn[i]).position(0).put(b.array(), b.position(), inputSize);
} else {
samplesIn[i] = new DoublePointer(b);
}
}
} else {
throw new Exception("Audio samples Buffer has unsupported type: " + samples);
}
int ret;
int outputFormat = audioC.sample_fmt();
if (samplesConvertCtx == null) {
samplesConvertCtx = swr_alloc_set_opts(null, audioC.channel_layout(), outputFormat, audioC.sample_rate(), audioC.channel_layout(), inputFormat, sampleRate, 0, null);
if (samplesConvertCtx == null) {
throw new Exception("swr_alloc_set_opts() error: Cannot allocate the conversion context.");
} else if ((ret = swr_init(samplesConvertCtx)) < 0) {
throw new Exception("swr_init() error " + ret + ": Cannot initialize the conversion context.");
}
}
for (int i = 0; i < samples.length; i++) {
samplesIn[i].position(samplesIn[i].position() * inputDepth).limit((samplesIn[i].position() + inputSize) * inputDepth);
}
int outputChannels = samplesOut.length > 1 ? 1 : audioChannels;
int outputDepth = av_get_bytes_per_sample(outputFormat);
int inputChannels = samples.length > 1 ? 1 : audioChannels;
while (true) {
int inputCount = (samplesIn[0].limit() - samplesIn[0].position()) / (inputChannels * inputDepth);
int outputCount = (samplesOut[0].limit() - samplesOut[0].position()) / (outputChannels * outputDepth);
inputCount = Math.min(inputCount, 2 * (outputCount * sampleRate) / audioC.sample_rate());
for (int i = 0; i < samples.length; i++) {
samplesInPtr.put(i, samplesIn[i]);
}
for (int i = 0; i < samplesOut.length; i++) {
samplesOutPtr.put(i, samplesOut[i]);
}
if ((ret = swr_convert(samplesConvertCtx, samplesOutPtr, outputCount, samplesInPtr, inputCount)) < 0) {
throw new Exception("swr_convert() error " + ret + ": Cannot convert audio samples.");
} else if (ret == 0) {
break;
}
for (int i = 0; i < samples.length; i++) {
samplesIn[i].position(samplesIn[i].position() + inputCount * inputChannels * inputDepth);
}
for (int i = 0; i < samplesOut.length; i++) {
samplesOut[i].position(samplesOut[i].position() + ret * outputChannels * outputDepth);
}
if (samplesOut[0].position() >= samplesOut[0].limit()) {
frame.nb_samples(audioInputFrameSize);
avcodec_fill_audio_frame(frame, audioC.channels(), outputFormat, samplesOut[0], samplesOut[0].limit(), 0);
for (int i = 0; i < samplesOut.length; i++) {
frame.data(i, samplesOut[i].position(0));
frame.linesize(i, samplesOut[i].limit());
}
frame.quality(audioC.global_quality());
record(frame);
}
}
return frame.key_frame() != 0;
}
Aggregations