use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NativeOpExecutioner method exec.
@Override
public INDArray exec(Accumulation op, int... dimension) {
dimension = Shape.normalizeAxis(op.x().rank(), dimension);
validateDataType(Nd4j.dataType(), op);
if (extraz.get() == null)
extraz.set(new PointerPointer(32));
int[] maxShape = Shape.getMaxShape(op.x(), op.y());
for (int i = 0; i < dimension.length; i++) if (dimension[i] >= maxShape.length && dimension[i] != Integer.MAX_VALUE)
throw new ND4JIllegalStateException("Op target dimension " + Arrays.toString(dimension) + " contains element that higher then rank of op.X: [" + op.x().rank() + "]");
for (int i = 0; i < dimension.length; i++) {
if (dimension[i] < 0)
dimension[i] += op.x().rank();
}
// do op along all dimensions
if (dimension.length == op.x().rank())
dimension = new int[] { Integer.MAX_VALUE };
int[] retShape;
if (Shape.wholeArrayDimension(dimension))
retShape = new int[] { 1, 1 };
else
retShape = ArrayUtil.removeIndex(maxShape, dimension);
// ensure vector is proper shape
if (retShape.length == 1) {
if (dimension[0] == 0)
retShape = new int[] { 1, retShape[0] };
else
retShape = new int[] { retShape[0], 1 };
} else if (retShape.length == 0) {
retShape = new int[] { 1, 1 };
}
if (op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape) && ArrayUtil.prodLong(retShape) > 1 && op.y() == null)
return op.noOp();
/**
* This is the result array.
* We create it only if we hadn't provided it before
*/
INDArray ret;
if (op.z() == null || op.z() == op.x()) {
if (op.isComplexAccumulation()) {
int xT = op.x().tensorssAlongDimension(dimension);
int yT = op.y().tensorssAlongDimension(dimension);
ret = Nd4j.create(xT, yT);
} else {
if (op.y() != null) {
// 2 options here: either pairwise, equal sizes - OR every X TAD vs. entirety of Y
if (op.x().lengthLong() == op.y().lengthLong()) {
// Pairwise
if (op.x().tensorssAlongDimension(dimension) != op.y().tensorssAlongDimension(dimension)) {
throw new ND4JIllegalStateException("Number of TADs along dimension don't match: (x shape = " + Arrays.toString(op.x().shape()) + ", y shape = " + Arrays.toString(op.y().shape()) + ", dimension = " + Arrays.toString(dimension) + ")");
}
} else {
// Every X TAD vs. entirety of Y
val xTADSize = op.x().lengthLong() / op.x().tensorssAlongDimension(dimension);
if (xTADSize != op.y().length()) {
throw new ND4JIllegalStateException("Size of TADs along dimension don't match for pairwise execution:" + " (x TAD size = " + xTADSize + ", y size = " + op.y().lengthLong());
}
}
}
if (op.x().data().dataType() == DataBuffer.Type.DOUBLE)
ret = Nd4j.valueArrayOf(retShape, op.zeroDouble());
else
ret = Nd4j.valueArrayOf(retShape, op.zeroFloat());
}
op.setZ(ret);
} else {
// compare length
if (!op.isComplexAccumulation() && op.z().lengthLong() != ArrayUtil.prodLong(retShape))
throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]");
else if (op.isComplexAccumulation()) {
int xT = op.x().tensorssAlongDimension(dimension);
int yT = op.y().tensorssAlongDimension(dimension);
if (op.z().lengthLong() != xT * yT)
throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + (xT * yT) + "]");
}
if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
op.z().assign(op.zeroDouble());
} else {
op.z().assign(op.zeroFloat());
}
ret = op.z();
}
/**
* Returns the {@link Shape#createShapeInformation(int[], int[], int, int, char)}
* and the associated offsets for each {@link INDArray#tensorAlongDimension(int, int...)}
* The first item is the shape information. The second one is the offsets.
*/
Pair<DataBuffer, DataBuffer> tadBuffers = tadManager.getTADOnlyShapeInfo(op.x(), dimension);
Pair<DataBuffer, DataBuffer> yTadBuffers = null;
/**
* Note that we use addresses in libnd4j.
* We use reinterpret cast in c to take the long
* we pass to JNI. This manages overhead.
*/
Pointer hostTadShapeInfo = tadBuffers.getFirst().addressPointer();
DataBuffer offsets = tadBuffers.getSecond();
Pointer hostTadOffsets = offsets == null ? null : offsets.addressPointer();
// we're going to check, if that's TAD vs TAD comparison or TAD vs full array. if later - we're going slightly different route
boolean tvf = false;
if (op.y() != null) {
if (op.x().tensorAlongDimension(0, dimension).lengthLong() == op.y().lengthLong()) {
tvf = true;
}
}
if (op.isComplexAccumulation()) {
yTadBuffers = tadManager.getTADOnlyShapeInfo(op.y(), dimension);
if (op.x().tensorAlongDimension(0, dimension).lengthLong() != op.y().tensorAlongDimension(0, dimension).lengthLong())
throw new ND4JIllegalStateException("Impossible to issue AllDistances operation: TAD lengths mismatch along given dimension");
}
/**
* This is a pointer to a pointer in c.
*/
// FIXME: we need something better then 3rd element being non-null here...
PointerPointer dummy = extraz.get().put(hostTadShapeInfo, hostTadOffsets, tvf ? hostTadOffsets : null);
long st = profilingHookIn(op, tadBuffers.getFirst());
/**
* Note because dimension arrays don't change,
* we use an {@link ConstantHandler} which knows how to reserve memory
* for immutable buffers for the dimensions.
* This gives us a pointer which is passed around in libnd4j.
*/
Pointer dimensionAddress = constantHandler.getConstantBuffer(dimension).addressPointer();
if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
if (op instanceof Variance) {
if (ret.isScalar()) {
ret.putScalar(0, loop.execSummaryStatsScalarDouble(dummy, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) getPointerForExtraArgs(op), true));
} else {
Variance var = (Variance) op;
loop.execSummaryStatsDouble(dummy, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) getPointerForExtraArgs(op), (DoublePointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length, var.isBiasCorrected());
}
} else // pairwise reduction like similarity of two arrays
if (op.y() != null && op.getOpType() == Op.Type.REDUCE3) {
if (op.isComplexAccumulation()) {
loop.execReduce3AllDouble(dummy, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) getPointerForExtraArgs(op), (DoublePointer) op.y().data().addressPointer(), (IntPointer) op.y().shapeInfoDataBuffer().addressPointer(), (DoublePointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length, (IntPointer) tadBuffers.getFirst().addressPointer(), new LongPointerWrapper(tadBuffers.getSecond().addressPointer()), (IntPointer) yTadBuffers.getFirst().addressPointer(), new LongPointerWrapper(yTadBuffers.getSecond().addressPointer()));
} else if (ret.isScalar()) {
ret.putScalar(0, loop.execReduce3ScalarDouble(dummy, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) getPointerForExtraArgs(op), (DoublePointer) op.y().data().addressPointer(), (IntPointer) op.y().shapeInfoDataBuffer().addressPointer()));
} else {
loop.execReduce3Double(dummy, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) getPointerForExtraArgs(op), (DoublePointer) op.y().data().addressPointer(), (IntPointer) op.y().shapeInfoDataBuffer().addressPointer(), (DoublePointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length);
}
} else {
if (ret.isScalar()) {
ret.putScalar(0, loop.execReduceScalarDouble(dummy, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) getPointerForExtraArgs(op)));
} else {
loop.execReduceDouble(dummy, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) getPointerForExtraArgs(op), (DoublePointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length);
}
}
} else {
if (op instanceof Variance) {
Variance variance = (Variance) op;
if (ret.isScalar()) {
ret.putScalar(0, loop.execSummaryStatsScalarFloat(dummy, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) getPointerForExtraArgs(op), variance.isBiasCorrected()));
} else {
loop.execSummaryStatsFloat(dummy, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) getPointerForExtraArgs(op), (FloatPointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length, variance.isBiasCorrected());
}
} else if (op.y() != null && op.getOpType() == Op.Type.REDUCE3) {
if (op.isComplexAccumulation()) {
loop.execReduce3AllFloat(dummy, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) getPointerForExtraArgs(op), (FloatPointer) op.y().data().addressPointer(), (IntPointer) op.y().shapeInfoDataBuffer().addressPointer(), (FloatPointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length, (IntPointer) tadBuffers.getFirst().addressPointer(), new LongPointerWrapper(tadBuffers.getSecond().addressPointer()), (IntPointer) yTadBuffers.getFirst().addressPointer(), new LongPointerWrapper(yTadBuffers.getSecond().addressPointer()));
} else if (ret.isScalar()) {
ret.putScalar(0, loop.execReduce3ScalarFloat(dummy, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) getPointerForExtraArgs(op), (FloatPointer) op.y().data().addressPointer(), (IntPointer) op.y().shapeInfoDataBuffer().addressPointer()));
} else {
loop.execReduce3Float(dummy, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) getPointerForExtraArgs(op), (FloatPointer) op.y().data().addressPointer(), (IntPointer) op.y().shapeInfoDataBuffer().addressPointer(), (FloatPointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length);
}
} else {
if (ret.isScalar()) {
ret.putScalar(0, loop.execReduceScalarFloat(dummy, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) getPointerForExtraArgs(op)));
} else {
loop.execReduceFloat(dummy, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) getPointerForExtraArgs(op), (FloatPointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length);
}
}
}
return ret;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NativeOpExecutioner method bitmapEncode.
@Override
public long bitmapEncode(INDArray indArray, INDArray target, double threshold) {
long length = indArray.lengthLong();
long tLen = target.data().length();
if (tLen != (length / 16 + 5))
throw new ND4JIllegalStateException("Length of target array should be " + (length / 16 + 5));
if (target.data().dataType() != DataBuffer.Type.INT)
throw new ND4JIllegalStateException("Target array should have INT dataType");
DataBuffer buffer = target.data();
buffer.put(0, (int) length);
buffer.put(1, (int) length);
buffer.put(2, Float.floatToIntBits((float) threshold));
// format id
buffer.put(3, ThresholdCompression.BITMAP_ENCODING);
long affected = 0;
if (indArray.data().dataType() == DataBuffer.Type.FLOAT) {
affected = loop.encodeBitmapFloat(null, (FloatPointer) indArray.data().addressPointer(), length, (IntPointer) buffer.addressPointer(), (float) threshold);
} else if (indArray.data().dataType() == DataBuffer.Type.DOUBLE) {
affected = loop.encodeBitmapDouble(null, (DoublePointer) indArray.data().addressPointer(), length, (IntPointer) buffer.addressPointer(), (float) threshold);
} else
throw new UnsupportedOperationException("HALF precision isn't supported on CPU yet");
return affected;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NativeOpExecutioner method exec.
/**
* This method executes given CustomOp
*
* PLEASE NOTE: You're responsible for input/output validation
* @param op
*/
public void exec(@NonNull CustomOp op) {
if (op.numOutputArguments() == 0 && !op.isInplaceCall())
throw new ND4JIllegalStateException("Op name " + op.opName() + " failed to execute. You can't execute non-inplace CustomOp without outputs being specified");
val name = op.opName().toLowerCase();
val hash = op.opHash();
val inputShapes = getInputShapes(op.numInputArguments());
val inputBuffers = getInputBuffers(op.numInputArguments());
int cnt = 0;
val inputArgs = op.inputArguments();
for (val in : inputArgs) {
if (in == null) {
throw new NullPointerException("Input argument is null");
}
inputBuffers.put(cnt, in.data().addressPointer());
inputShapes.put(cnt++, in.shapeInfoDataBuffer().addressPointer());
}
val outputArgs = op.outputArguments();
for (int i = 0; i < outputArgs.length; i++) {
if (outputArgs[i] == null)
throw new ND4JIllegalStateException("Op output arguments must not be null!");
}
val outputShapes = getOutputShapes(op.numOutputArguments());
val outputBuffers = getOutputBuffers(op.numOutputArguments());
cnt = 0;
for (val out : outputArgs) {
outputBuffers.put(cnt, out.data().addressPointer());
outputShapes.put(cnt++, out.shapeInfoDataBuffer().addressPointer());
}
val iArgs = op.numIArguments() > 0 ? new IntPointer(op.numIArguments()) : null;
cnt = 0;
val iArgs1 = op.iArgs();
for (val i : iArgs1) iArgs.put(cnt++, i);
if (Nd4j.dataType() == DataBuffer.Type.FLOAT) {
val tArgs = op.numTArguments() > 0 ? new FloatPointer(op.numTArguments()) : null;
val tArgs1 = op.tArgs();
cnt = 0;
for (val t : tArgs1) tArgs.put(cnt++, (float) t);
val status = OpStatus.byNumber(loop.execCustomOpFloat(null, hash, inputBuffers, inputShapes, op.numInputArguments(), outputBuffers, outputShapes, op.numOutputArguments(), tArgs, op.numTArguments(), iArgs, op.numIArguments(), op.isInplaceCall()));
if (status != OpStatus.ND4J_STATUS_OK)
throw new ND4JIllegalStateException("Op execution failed: " + status);
} else if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) {
val tArgs = op.numTArguments() > 0 ? getDoublePointerFrom(tArgsPointer, op.numTArguments()) : null;
val tArgs1 = op.tArgs();
cnt = 0;
for (val t : tArgs1) tArgs.put(cnt++, t);
val t = op.numInputArguments();
OpStatus status = OpStatus.ND4J_STATUS_OK;
try {
status = OpStatus.byNumber(loop.execCustomOpDouble(null, hash, inputBuffers, inputShapes, op.numInputArguments(), outputBuffers, outputShapes, op.numOutputArguments(), tArgs, op.numTArguments(), iArgs, op.numIArguments(), op.isInplaceCall()));
} catch (Exception e) {
log.error("Failed to execute. Please see above message (printed out from c++) for a possible cause of error.");
throw e;
}
} else if (Nd4j.dataType() == DataBuffer.Type.HALF) {
val tArgs = op.numTArguments() > 0 ? getShortPointerFrom(halfArgsPointer, op.numTArguments()) : null;
cnt = 0;
val tArgs1 = op.tArgs();
for (val t : tArgs1) tArgs.put(cnt++, ArrayUtil.toHalf(t));
val status = OpStatus.byNumber(loop.execCustomOpHalf(null, hash, inputBuffers, inputShapes, op.numInputArguments(), outputBuffers, outputShapes, op.numOutputArguments(), tArgs, op.numTArguments(), iArgs, op.numIArguments(), op.isInplaceCall()));
if (status != OpStatus.ND4J_STATUS_OK)
throw new ND4JIllegalStateException("Op execution failed: " + status);
}
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NativeOpExecutioner method exec.
private void exec(ScalarOp op) {
if (op.x() instanceof IComplexNDArray || executionMode() == ExecutionMode.JAVA) {
super.exec(op);
} else {
long st = profilingHookIn(op);
validateDataType(Nd4j.dataType(), op);
if (op.x().lengthLong() != op.z().lengthLong())
throw new ND4JIllegalStateException("op.X length should be equal to op.Z length: [" + Arrays.toString(op.x().shapeInfoDataBuffer().asInt()) + "] != [" + Arrays.toString(op.z().shapeInfoDataBuffer().asInt()) + "]");
if (op.getDimension() != null) {
invoke(op, op.getDimension());
return;
}
if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
if (op.x().elementWiseStride() >= 1 && !op.isExecSpecial() && op.z().elementWiseStride() >= 1 && !op.isExecSpecial()) {
loop.execScalarDouble(null, op.opNum(), (DoublePointer) op.x().data().addressPointer(), op.x().elementWiseStride(), (DoublePointer) op.z().data().addressPointer(), op.z().elementWiseStride(), op.scalar().doubleValue(), (DoublePointer) getPointerForExtraArgs(op), op.n());
} else
loop.execScalarDouble(null, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), op.scalar().doubleValue(), (DoublePointer) getPointerForExtraArgs(op));
} else {
if (op.x().elementWiseStride() >= 1 && !op.isExecSpecial() && op.z().elementWiseStride() >= 1 && !op.isExecSpecial()) {
loop.execScalarFloat(null, op.opNum(), (FloatPointer) op.x().data().addressPointer(), op.x().elementWiseStride(), (FloatPointer) op.z().data().addressPointer(), op.z().elementWiseStride(), op.scalar().floatValue(), (FloatPointer) getPointerForExtraArgs(op), op.n());
} else
loop.execScalarFloat(null, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), op.scalar().floatValue(), (FloatPointer) getPointerForExtraArgs(op));
}
profilingHookOut(op, st);
}
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NativeOpExecutioner method thresholdDecode.
@Override
public INDArray thresholdDecode(INDArray encoded, INDArray target) {
DataBuffer buffer = encoded.data();
if (buffer.dataType() != DataBuffer.Type.INT)
throw new ND4JIllegalStateException("thresholdEncoded array should have dataType of INT");
long compressedLength = buffer.getInt(0);
long originalLength = buffer.getInt(1);
float threshold = buffer.getInt(2);
if (target.lengthLong() != originalLength)
throw new ND4JIllegalStateException("originalLength [" + originalLength + "] stored in encoded array doesn't match target length [" + target.lengthLong() + "]");
DataBuffer.TypeEx typeDst = AbstractCompressor.getBufferTypeEx(target.data());
loop.convertTypes(null, DataBuffer.TypeEx.THRESHOLD.ordinal(), buffer.addressPointer(), target.length(), typeDst.ordinal(), target.data().addressPointer());
return target;
}
Aggregations