use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NativeOpExecutioner method executeGraph.
@Override
public Map<String, INDArray> executeGraph(long id, Map<String, INDArray> map) {
val ptrBuffers = new PointerPointer(map.size());
val ptrShapes = new PointerPointer(map.size());
val ptrIndices = new IntPointer(map.size());
int cnt = 0;
val keySet = new ArrayList<String>(map.keySet());
for (val key : keySet) {
val array = map.get(key);
ptrBuffers.put(cnt, array.data().addressPointer());
ptrShapes.put(cnt, array.shapeInfoDataBuffer().addressPointer());
ptrIndices.put(cnt, cnt);
cnt++;
}
val newMap = new LinkedHashMap<String, INDArray>();
if (Nd4j.dataType() == DataBuffer.Type.FLOAT) {
val result = (Nd4jCpu.FloatVariablesSet) loop.executeStoredGraphFloat(null, id, ptrBuffers, ptrShapes, ptrIndices, map.size());
val status = OpStatus.byNumber(result.status());
if (status != OpStatus.ND4J_STATUS_OK)
throw new ND4JIllegalStateException("Op execution failed: " + status);
for (int e = 0; e < result.size(); e++) {
val var = result.at(e);
val nodeId = var.id();
val index = var.index();
val shapeInfo = var.getNDArray().shapeInfo();
val buffer = var.getNDArray().buffer();
val rank = shapeInfo.get(0);
val jshape = new int[rank * 2 + 4];
for (int i = 0; i < jshape.length; i++) {
jshape[i] = shapeInfo.get(i);
}
val shapeOf = Shape.shapeOf(jshape);
val stridesOf = Shape.stridesOf(jshape);
val order = Shape.order(jshape);
val array = Nd4j.create(shapeOf, stridesOf, 0, order);
Pointer.memcpy(array.data().addressPointer(), buffer, ArrayUtil.prod(shapeOf) * Nd4j.sizeOfDataType());
newMap.put(keySet.get(nodeId), array);
}
loop.deleteVariablesSetFloat(result);
} else if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) {
val result = (Nd4jCpu.DoubleVariablesSet) loop.executeStoredGraphDouble(null, id, ptrBuffers, ptrShapes, ptrIndices, map.size());
val status = OpStatus.byNumber(result.status());
if (status != OpStatus.ND4J_STATUS_OK)
throw new ND4JIllegalStateException("Op execution failed: " + status);
for (int e = 0; e < result.size(); e++) {
val var = result.at(e);
val nodeId = var.id();
val index = var.index();
val shapeInfo = var.getNDArray().shapeInfo();
val buffer = var.getNDArray().buffer();
val rank = shapeInfo.get(0);
val jshape = new int[rank * 2 + 4];
for (int i = 0; i < jshape.length; i++) {
jshape[i] = shapeInfo.get(i);
}
val shapeOf = Shape.shapeOf(jshape);
val stridesOf = Shape.stridesOf(jshape);
val order = Shape.order(jshape);
val array = Nd4j.create(shapeOf, stridesOf, 0, order);
Pointer.memcpy(array.data().addressPointer(), buffer, ArrayUtil.prod(shapeOf) * Nd4j.sizeOfDataType());
newMap.put(keySet.get(nodeId), array);
}
loop.deleteVariablesSetDouble(result);
} else if (Nd4j.dataType() == DataBuffer.Type.HALF) {
val result = (Nd4jCpu.DoubleVariablesSet) loop.executeStoredGraphHalf(null, id, ptrBuffers, ptrShapes, ptrIndices, map.size());
val status = OpStatus.byNumber(result.status());
if (status != OpStatus.ND4J_STATUS_OK)
throw new ND4JIllegalStateException("Op execution failed: " + status);
for (int e = 0; e < result.size(); e++) {
val var = result.at(e);
val nodeId = var.id();
val index = var.index();
val shapeInfo = var.getNDArray().shapeInfo();
val buffer = var.getNDArray().buffer();
val rank = shapeInfo.get(0);
val jshape = new int[rank * 2 + 4];
for (int i = 0; i < jshape.length; i++) {
jshape[i] = shapeInfo.get(i);
}
val shapeOf = Shape.shapeOf(jshape);
val stridesOf = Shape.stridesOf(jshape);
val order = Shape.order(jshape);
val array = Nd4j.create(shapeOf, stridesOf, 0, order);
Pointer.memcpy(array.data().addressPointer(), buffer, ArrayUtil.prod(shapeOf) * Nd4j.sizeOfDataType());
newMap.put(keySet.get(nodeId), array);
}
loop.deleteVariablesSetHalf(result);
}
return newMap;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class CpuNDArrayFactory method average.
/**
* This method averages input arrays, and returns averaged array
*
* @param target
* @param arrays
* @return
*/
@Override
public INDArray average(INDArray target, INDArray[] arrays) {
if (arrays == null || arrays.length == 0)
throw new RuntimeException("Input arrays are missing");
if (arrays.length == 1)
return target.assign(arrays[0]);
long len = target != null ? target.lengthLong() : arrays[0].length();
PointerPointer dataPointers = new PointerPointer(arrays.length);
for (int i = 0; i < arrays.length; i++) {
Nd4j.getCompressor().autoDecompress(arrays[i]);
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
dataPointers.put(i, arrays[i].data().addressPointer());
}
if (arrays[0].data().dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.averageDouble(null, dataPointers, target == null ? null : (DoublePointer) target.data().addressPointer(), arrays.length, len, true);
} else if (arrays[0].data().dataType() == DataBuffer.Type.FLOAT) {
nativeOps.averageFloat(null, dataPointers, target == null ? null : (FloatPointer) target.data().addressPointer(), arrays.length, len, true);
} else {
nativeOps.averageHalf(null, dataPointers, target == null ? null : (ShortPointer) target.data().addressPointer(), arrays.length, len, true);
}
return target;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class CpuNDArrayFactory method shuffle.
/**
* Symmetric in place shuffle of an ndarray
* along a specified set of dimensions. Each array in list should have it's own dimension at the same index of dimensions array
*
* @param arrays the ndarrays to shuffle
* @param dimensions the dimensions to do the shuffle
* @return
*/
@Override
public void shuffle(List<INDArray> arrays, Random rnd, List<int[]> dimensions) {
if (dimensions == null || dimensions.size() == 0)
throw new RuntimeException("Dimension can't be null or 0-length");
if (arrays == null || arrays.size() == 0)
throw new RuntimeException("No input arrays provided");
if (dimensions.size() > 1 && arrays.size() != dimensions.size())
throw new IllegalStateException("Number of dimensions do not match number of arrays to shuffle");
int tadLength = 1;
for (int i = 0; i < dimensions.get(0).length; i++) {
tadLength *= arrays.get(0).shape()[dimensions.get(0)[i]];
}
int numTads = arrays.get(0).length() / tadLength;
int[] map = ArrayUtil.buildInterleavedVector(rnd, numTads);
PointerPointer dataPointers = new PointerPointer(arrays.size());
PointerPointer shapePointers = new PointerPointer(arrays.size());
PointerPointer tadPointers = new PointerPointer(arrays.size());
PointerPointer offsetPointers = new PointerPointer(arrays.size());
PointerPointer dummy = new PointerPointer(new Pointer[] { null });
List<Pair<DataBuffer, DataBuffer>> list = new ArrayList<>();
TADManager tadManager = Nd4j.getExecutioner().getTADManager();
IntPointer ptrMap = new IntPointer(map);
long[] ptrs = new long[arrays.size()];
for (int i = 0; i < arrays.size(); i++) {
INDArray array = arrays.get(i);
Nd4j.getCompressor().autoDecompress(array);
int[] dimension = dimensions.size() > 1 ? dimensions.get(i) : dimensions.get(0);
Pair<DataBuffer, DataBuffer> tadBuffers = tadManager.getTADOnlyShapeInfo(array, dimension);
list.add(tadBuffers);
Pointer hostTadShapeInfo = tadBuffers.getFirst().addressPointer();
DataBuffer offsets = tadBuffers.getSecond();
if (offsets.length() != numTads)
throw new ND4JIllegalStateException("Can't symmetrically shuffle arrays with non-equal number of TADs");
if (offsets == null)
throw new ND4JIllegalStateException("Offsets for shuffle can't be null");
dataPointers.put(i, array.data().addressPointer());
shapePointers.put(i, array.shapeInfoDataBuffer().addressPointer());
offsetPointers.put(i, offsets.addressPointer());
tadPointers.put(i, tadBuffers.getFirst().addressPointer());
}
if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.shuffleDouble(dummy, dataPointers, shapePointers, dataPointers, shapePointers, arrays.size(), ptrMap, tadPointers, offsetPointers);
} else if (Nd4j.dataType() == DataBuffer.Type.FLOAT) {
nativeOps.shuffleFloat(dummy, dataPointers, shapePointers, dataPointers, shapePointers, arrays.size(), ptrMap, tadPointers, offsetPointers);
} else {
// HALFs
}
dataPointers.address();
shapePointers.address();
tadPointers.address();
offsetPointers.address();
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class CpuNDArrayFactory method accumulate.
public INDArray accumulate(INDArray target, INDArray... arrays) {
if (arrays == null || arrays.length == 0)
throw new RuntimeException("Input arrays are missing");
if (arrays.length == 1)
return target.addi(arrays[0]);
long len = target.lengthLong();
PointerPointer dataPointers = new PointerPointer(arrays.length);
for (int i = 0; i < arrays.length; i++) {
Nd4j.getCompressor().autoDecompress(arrays[i]);
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native accumulation is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for accumulation");
dataPointers.put(i, arrays[i].data().addressPointer());
}
if (target.data().dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.accumulateDouble(null, dataPointers, (DoublePointer) target.data().addressPointer(), arrays.length, len);
} else if (target.data().dataType() == DataBuffer.Type.FLOAT) {
nativeOps.accumulateFloat(null, dataPointers, (FloatPointer) target.data().addressPointer(), arrays.length, len);
} else {
nativeOps.accumulateHalf(null, dataPointers, (ShortPointer) target.data().addressPointer(), arrays.length, len);
}
return target;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class JCublasNDArrayFactory method average.
@Override
public INDArray average(INDArray target, INDArray[] arrays) {
if (arrays == null || arrays.length == 0)
throw new RuntimeException("Input arrays are missing");
if (arrays.length == 1)
return target.assign(arrays[0]);
// we do averaging on GPU only if ALL devices have p2p links
if (nativeOps.isP2PAvailable() && CudaEnvironment.getInstance().getConfiguration().isCrossDeviceAccessAllowed()) {
Nd4j.getExecutioner().push();
long len = target != null ? target.lengthLong() : arrays[0].lengthLong();
AtomicAllocator allocator = AtomicAllocator.getInstance();
CudaContext context = allocator.getFlowController().prepareAction(target, arrays);
PointerPointer extras = new // not used
PointerPointer(// not used
null, context.getOldStream(), allocator.getDeviceIdPointer(), new CudaPointer(0));
Pointer z = target == null ? null : AtomicAllocator.getInstance().getPointer(target, context);
long[] xPointers = new long[arrays.length];
for (int i = 0; i < arrays.length; i++) {
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
AllocationPoint point = allocator.getAllocationPoint(arrays[i]);
xPointers[i] = point.getPointers().getDevicePointer().address();
point.tickDeviceWrite();
}
CudaDoubleDataBuffer tempX = new CudaDoubleDataBuffer(arrays.length);
allocator.memcpyBlocking(tempX, new LongPointer(xPointers), xPointers.length * 8, 0);
PointerPointer x = new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context));
if (arrays[0].data().dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.averageDouble(extras, x, target == null ? null : (DoublePointer) z, arrays.length, len, true);
} else if (arrays[0].data().dataType() == DataBuffer.Type.FLOAT) {
nativeOps.averageFloat(extras, x, target == null ? null : (FloatPointer) z, arrays.length, len, true);
} else {
nativeOps.averageHalf(extras, x, target == null ? null : (ShortPointer) z, arrays.length, len, true);
}
allocator.getFlowController().registerAction(context, target, arrays);
tempX.address();
return target;
} else {
// otherwise we do averging on CPU side
/**
* We expect all operations are complete at this point
*/
long len = target == null ? arrays[0].lengthLong() : target.lengthLong();
CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext();
PointerPointer dataPointers = new PointerPointer(arrays.length);
PointerPointer extras = new // not used
PointerPointer(// not used
null, context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer(), new CudaPointer(1));
for (int i = 0; i < arrays.length; i++) {
Nd4j.getCompressor().autoDecompress(arrays[i]);
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
dataPointers.put(i, AtomicAllocator.getInstance().getHostPointer(arrays[i]));
}
if (arrays[0].data().dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.averageDouble(extras, dataPointers, target == null ? null : (DoublePointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len, true);
} else if (arrays[0].data().dataType() == DataBuffer.Type.FLOAT) {
nativeOps.averageFloat(extras, dataPointers, target == null ? null : (FloatPointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len, true);
} else {
nativeOps.averageHalf(extras, dataPointers, target == null ? null : (ShortPointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len, true);
}
if (target != null)
AtomicAllocator.getInstance().getAllocationPoint(target).tickHostWrite();
// TODO: make propagation optional maybe?
if (true) {
for (int i = 0; i < arrays.length; i++) {
AtomicAllocator.getInstance().getAllocationPoint(arrays[i]).tickHostWrite();
}
}
return target;
}
}
Aggregations