use of org.nd4j.linalg.compression.CompressedDataBuffer in project nd4j by deeplearning4j.
the class Uint8 method compressPointer.
@Override
protected CompressedDataBuffer compressPointer(DataBuffer.TypeEx srcType, Pointer srcPointer, int length, int elementSize) {
BytePointer ptr = new BytePointer(length);
CompressionDescriptor descriptor = new CompressionDescriptor();
descriptor.setCompressedLength(length * 1);
descriptor.setOriginalLength(length * elementSize);
descriptor.setOriginalElementSize(elementSize);
descriptor.setNumberOfElements(length);
descriptor.setCompressionAlgorithm(getDescriptor());
descriptor.setCompressionType(getCompressionType());
CompressedDataBuffer buffer = new CompressedDataBuffer(ptr, descriptor);
Nd4j.getNDArrayFactory().convertDataEx(srcType, srcPointer, DataBuffer.TypeEx.UINT8, ptr, length);
return buffer;
}
use of org.nd4j.linalg.compression.CompressedDataBuffer in project nd4j by deeplearning4j.
the class CpuNDArrayFactory method convertDataEx.
/**
* This method converts Half-precision databuffer to current dType buffer.
*
* @param buffer
* @return
*/
/*
@Override
public DataBuffer restoreFromHalfs(DataBuffer buffer) {
if (buffer.dataType() != DataBuffer.Type.COMPRESSED)
throw new IllegalStateException("DataBuffer contains wrong data: " + buffer.dataType());
CompressedDataBuffer comp = (CompressedDataBuffer) buffer;
CompressionDescriptor descriptor = comp.getCompressionDescriptor();
DataBuffer targetBuffer = Nd4j.createBuffer(descriptor.getCompressedLength() / 2);
if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.convertHalfsToDoubles(
null,
comp.addressPointer(),
(int) descriptor.getCompressedLength() / 2,
targetBuffer.addressPointer()
);
} else if (Nd4j.dataType() == DataBuffer.Type.FLOAT) {
nativeOps.convertHalfsToFloats(
null,
comp.addressPointer(),
(int) descriptor.getCompressedLength() / 2,
targetBuffer.addressPointer()
);
} else {
throw new UnsupportedOperationException("Target dtype isn't supported: " + Nd4j.dataType());
}
return targetBuffer;
}
*/
/**
* This method converts Single/Double precision databuffer to Half-precision databuffer
*
* @param buffer
* @return
*/
/*@Override
public DataBuffer convertToHalfs(DataBuffer buffer) {
// we allocate pointer
ShortPointer pointer = new ShortPointer(buffer.length());
if (buffer.dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.convertDoublesToHalfs(
null,
buffer.addressPointer(),
(int) buffer.length(),
pointer
);
} else if (buffer.dataType() == DataBuffer.Type.FLOAT) {
nativeOps.convertFloatsToHalfs(
null,
buffer.addressPointer(),
(int) buffer.length(),
pointer
);
} else {
throw new UnsupportedOperationException("Source dtype isn't supported: " + buffer.dataType());
}
CompressionDescriptor descriptor = new CompressionDescriptor(buffer, new Float16());
descriptor.setCompressedLength(buffer.length() * 2);
CompressedDataBuffer result = new CompressedDataBuffer(pointer, descriptor);
return result;
}
*/
/**
* This method converts Single/Double precision databuffer to Half-precision databuffer
*
* @param typeSrc
* @param source
* @param typeDst @return
*/
@Override
public INDArray convertDataEx(DataBuffer.TypeEx typeSrc, INDArray source, DataBuffer.TypeEx typeDst) {
if (source.isView())
throw new UnsupportedOperationException("Impossible to compress View. Consider using dup() before. ");
DataBuffer buffer = convertDataEx(typeSrc, source.data(), typeDst);
source.setData(buffer);
if (buffer instanceof CompressedDataBuffer)
source.markAsCompressed(true);
else
source.markAsCompressed(false);
return source;
}
use of org.nd4j.linalg.compression.CompressedDataBuffer in project nd4j by deeplearning4j.
the class BinarySerde method toArrayAndByteBuffer.
/**
* Create an ndarray and existing bytebuffer
* @param buffer
* @param offset
* @return
*/
public static Pair<INDArray, ByteBuffer> toArrayAndByteBuffer(ByteBuffer buffer, int offset) {
ByteBuffer byteBuffer = buffer == null ? ByteBuffer.allocateDirect(buffer.array().length).put(buffer.array()).order(ByteOrder.nativeOrder()) : buffer.order(ByteOrder.nativeOrder());
// bump the byte buffer to the proper position
byteBuffer.position(offset);
int rank = byteBuffer.getInt();
if (rank < 0)
throw new IllegalStateException("Found negative integer. Corrupt serialization?");
// get the shape buffer length to create the shape information buffer
int shapeBufferLength = Shape.shapeInfoLength(rank);
// create the ndarray shape information
DataBuffer shapeBuff = Nd4j.createBufferDetached(new int[shapeBufferLength]);
// compute the databuffer opType from the index
DataBuffer.Type type = DataBuffer.Type.values()[byteBuffer.getInt()];
for (int i = 0; i < shapeBufferLength; i++) {
shapeBuff.put(i, byteBuffer.getInt());
}
// after the rank,data opType, shape buffer (of length shape buffer length) * sizeof(int)
if (type != DataBuffer.Type.COMPRESSED) {
ByteBuffer slice = byteBuffer.slice();
// wrap the data buffer for the last bit
DataBuffer buff = Nd4j.createBuffer(slice, type, Shape.length(shapeBuff));
// advance past the data
int position = byteBuffer.position() + (buff.getElementSize() * (int) buff.length());
byteBuffer.position(position);
// create the final array
// TODO: see how to avoid dup here
INDArray arr = Nd4j.createArrayFromShapeBuffer(buff.dup(), shapeBuff.dup());
return Pair.of(arr, byteBuffer);
} else {
CompressionDescriptor compressionDescriptor = CompressionDescriptor.fromByteBuffer(byteBuffer);
ByteBuffer slice = byteBuffer.slice();
// ensure that we only deal with the slice of the buffer that is actually the data
BytePointer byteBufferPointer = new BytePointer(slice);
// create a compressed array based on the rest of the data left in the buffer
CompressedDataBuffer compressedDataBuffer = new CompressedDataBuffer(byteBufferPointer, compressionDescriptor);
// TODO: see how to avoid dup()
INDArray arr = Nd4j.createArrayFromShapeBuffer(compressedDataBuffer.dup(), shapeBuff.dup());
// advance past the data
int compressLength = (int) compressionDescriptor.getCompressedLength();
byteBuffer.position(byteBuffer.position() + compressLength);
return Pair.of(arr, byteBuffer);
}
}
use of org.nd4j.linalg.compression.CompressedDataBuffer in project nd4j by deeplearning4j.
the class BinarySerde method byteBufferSizeFor.
/**
* Returns the byte buffer size for the given
* ndarray. This is an auxillary method
* for determining the size of the buffer
* size to allocate for sending an ndarray via
* the aeron media driver.
*
* The math break down for uncompressed is:
* 2 ints for rank of the array and an ordinal representing the data opType of the data buffer
* The rest is in order:
* shape information
* data buffer
*
* The math break down for compressed is:
* 2 ints for rank and an ordinal representing the data opType for the data buffer
*
* The rest is in order:
* shape information
* codec information
* data buffer
*
* @param arr the array to compute the size for
* @return the size of the byte buffer that was allocated
*/
public static int byteBufferSizeFor(INDArray arr) {
if (!arr.isCompressed()) {
ByteBuffer buffer = arr.data().pointer().asByteBuffer().order(ByteOrder.nativeOrder());
ByteBuffer shapeBuffer = arr.shapeInfoDataBuffer().pointer().asByteBuffer().order(ByteOrder.nativeOrder());
// 2 four byte ints at the beginning
int twoInts = 8;
return twoInts + buffer.limit() + shapeBuffer.limit();
} else {
CompressedDataBuffer compressedDataBuffer = (CompressedDataBuffer) arr.data();
CompressionDescriptor descriptor = compressedDataBuffer.getCompressionDescriptor();
ByteBuffer codecByteBuffer = descriptor.toByteBuffer();
ByteBuffer buffer = arr.data().pointer().asByteBuffer().order(ByteOrder.nativeOrder());
ByteBuffer shapeBuffer = arr.shapeInfoDataBuffer().pointer().asByteBuffer().order(ByteOrder.nativeOrder());
int twoInts = 2 * 4;
return twoInts + buffer.limit() + shapeBuffer.limit() + codecByteBuffer.limit();
}
}
use of org.nd4j.linalg.compression.CompressedDataBuffer in project nd4j by deeplearning4j.
the class CudaMemoryManager method memcpy.
/**
* This method provides basic memcpy functionality with respect to target environment
*
* @param dstBuffer
* @param srcBuffer
*/
@Override
public void memcpy(DataBuffer dstBuffer, DataBuffer srcBuffer) {
CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext();
if (dstBuffer instanceof CompressedDataBuffer && !(srcBuffer instanceof CompressedDataBuffer)) {
// destination is compressed, source isn't
AllocationPoint srcPoint = AtomicAllocator.getInstance().getAllocationPoint(srcBuffer);
long size = srcBuffer.getElementSize() * srcBuffer.length();
if (!srcPoint.isActualOnHostSide()) {
// copying device -> host
AtomicAllocator.getInstance().synchronizeHostData(srcBuffer);
// Pointer src = AtomicAllocator.getInstance().getPointer(srcBuffer, context);
// NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(dstBuffer.addressPointer(), src, size, 2, context.getSpecialStream());
// context.syncSpecialStream();
}
// else {
// copying host -> host
Pointer src = AtomicAllocator.getInstance().getHostPointer(srcBuffer);
Pointer.memcpy(dstBuffer.addressPointer(), src, size);
// }
} else if (!(dstBuffer instanceof CompressedDataBuffer) && srcBuffer instanceof CompressedDataBuffer) {
// destination is NOT compressed, source is compressed
AllocationPoint dstPoint = AtomicAllocator.getInstance().getAllocationPoint(dstBuffer);
long size = srcBuffer.getElementSize() * srcBuffer.length();
Pointer.memcpy(dstBuffer.addressPointer(), srcBuffer.addressPointer(), size);
dstPoint.tickHostWrite();
} else if (dstBuffer instanceof CompressedDataBuffer && srcBuffer instanceof CompressedDataBuffer) {
// both buffers are compressed, just fire memcpy
Pointer.memcpy(dstBuffer.addressPointer(), srcBuffer.addressPointer(), srcBuffer.length() * srcBuffer.getElementSize());
} else {
// both buffers are NOT compressed
AtomicAllocator.getInstance().memcpy(dstBuffer, srcBuffer);
}
}
Aggregations