use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NativeOpExecutioner method exec.
@Override
public INDArray exec(BroadcastOp op, int... dimension) {
long st = profilingHookIn(op);
if (dimension == null)
dimension = new int[] { Integer.MAX_VALUE };
dimension = Shape.normalizeAxis(op.x().rank(), dimension);
validateDataType(Nd4j.dataType(), op);
for (int i = 0; i < dimension.length; i++) if (dimension[i] >= op.x().rank() && dimension[i] != Integer.MAX_VALUE)
throw new ND4JIllegalStateException("Op target dimension " + Arrays.toString(dimension) + " contains element that higher then rank of op.X: [" + op.x().rank() + "]");
/**
* Returns the {@link Shape#createShapeInformation(int[], int[], int, int, char)}
* and the associated offsets for each {@link INDArray#tensorAlongDimension(int, int...)}
* The first item is the shape information. The second one is the offsets.
*/
Pair<DataBuffer, DataBuffer> tadBuffers = tadManager.getTADOnlyShapeInfo(op.x(), dimension);
Pointer hostTadShapeInfo = tadBuffers.getFirst().addressPointer();
Pointer hostTadOffsets = tadBuffers.getSecond().addressPointer();
Pointer devTadShapeInfoZ = null;
Pointer devTadOffsetsZ = null;
// if (!Arrays.equals(op.x().shape(),op.z().shape()) || !Arrays.equals(op.x().stride(),op.z().stride()) || op.x().ordering() != op.z().ordering()) {
// that's the place where we're going to have second TAD in place
Pair<DataBuffer, DataBuffer> tadBuffersZ = tadManager.getTADOnlyShapeInfo(op.z(), dimension);
devTadShapeInfoZ = tadBuffersZ.getFirst().addressPointer();
devTadOffsetsZ = tadBuffersZ.getSecond().addressPointer();
if (extraz.get() == null)
extraz.set(new PointerPointer(32));
PointerPointer dummy = extraz.get().put(hostTadShapeInfo, hostTadOffsets, devTadShapeInfoZ, devTadOffsetsZ);
Pointer dimensionAddress = constantHandler.getConstantBuffer(dimension).addressPointer();
if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
loop.execBroadcastDouble(dummy, op.opNum(), (DoublePointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (DoublePointer) op.y().data().addressPointer(), (IntPointer) op.y().shapeInfoDataBuffer().addressPointer(), (DoublePointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length);
} else {
loop.execBroadcastFloat(dummy, op.opNum(), (FloatPointer) op.x().data().addressPointer(), (IntPointer) op.x().shapeInfoDataBuffer().addressPointer(), (FloatPointer) op.y().data().addressPointer(), (IntPointer) op.y().shapeInfoDataBuffer().addressPointer(), (FloatPointer) op.z().data().addressPointer(), (IntPointer) op.z().shapeInfoDataBuffer().addressPointer(), (IntPointer) dimensionAddress, dimension.length);
}
return op.z();
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class DifferentialFunction method resolvePropertiesFromSameDiffBeforeExecution.
/**
* Resolve properties and arguments right before execution of
* this operation.
*/
public void resolvePropertiesFromSameDiffBeforeExecution() {
val properties = sameDiff.propertiesToResolveForFunction(this);
val fields = DifferentialFunctionClassHolder.getInstance().getFieldsForFunction(this);
val currentFields = this.propertiesForFunction();
for (val property : properties) {
// just skip if this is the case
if (!fields.containsKey(property))
continue;
val var = sameDiff.getVarNameForFieldAndFunction(this, property);
val fieldType = fields.get(property);
val varArr = sameDiff.getArrForVarName(var);
// already defined
if (currentFields.containsKey(property)) {
continue;
}
/**
* Possible cause:
* Might be related to output name alignment.
*/
if (varArr == null) {
throw new ND4JIllegalStateException("Unable to set null array!");
}
if (fieldType.getType().equals(int[].class)) {
setValueFor(fieldType, varArr.data().asInt());
} else if (fieldType.equals(double[].class)) {
setValueFor(fieldType, varArr.data().asDouble());
} else if (fieldType.equals(int.class)) {
setValueFor(fieldType, varArr.getInt(0));
} else if (fieldType.equals(double.class)) {
setValueFor(fieldType, varArr.getDouble(0));
}
}
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NativeGraphExecutioner method executeGraph.
/**
* This method executes given graph and returns results
*
* @param sd
* @return
*/
@Override
public INDArray[] executeGraph(SameDiff sd, ExecutorConfiguration configuration) {
Map<Integer, Node> intermediate = new HashMap<>();
ByteBuffer buffer = convertToFlatBuffers(sd, configuration, intermediate);
BytePointer bPtr = new BytePointer(buffer);
log.info("Buffer length: {}", buffer.limit());
Pointer res = NativeOpsHolder.getInstance().getDeviceNativeOps().executeFlatGraphFloat(null, bPtr);
if (res == null)
throw new ND4JIllegalStateException("Graph execution failed");
// FIXME: this is BAD
PagedPointer pagedPointer = new PagedPointer(res, 1024 * 1024L);
FlatResult fr = FlatResult.getRootAsFlatResult(pagedPointer.asBytePointer().asByteBuffer());
log.info("VarMap: {}", sd.variableMap());
INDArray[] results = new INDArray[fr.variablesLength()];
for (int e = 0; e < fr.variablesLength(); e++) {
FlatVariable var = fr.variables(e);
log.info("Var received: id: [{}:{}/<{}>];", var.id().first(), var.id().second(), var.name());
FlatArray ndarray = var.ndarray();
INDArray val = Nd4j.createFromFlatArray(ndarray);
results[e] = val;
if (var.name() != null && sd.variableMap().containsKey(var.name())) {
// log.info("VarName: {}; Exists: {}; NDArrayInfo: {};", var.opName(), sd.variableMap().containsKey(var.opName()), sd.getVertexToArray().containsKey(var.opName()));
// log.info("storing: {}; array: {}", var.name(), val);
sd.associateArrayWithVariable(val, sd.variableMap().get(var.name()));
} else {
// log.info("Original id: {}; out: {}; out2: {}", original, sd.getVertexIdxToInfo().get(original), graph.getVariableForVertex(original));
if (sd.variableMap().get(var.name()) != null) {
sd.associateArrayWithVariable(val, sd.getVariable(var.name()));
} else {
throw new ND4JIllegalStateException("Unknown variable received as result: [" + var.name() + "]");
}
}
}
return results;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class TensorflowDescriptorParser method opDescs.
/**
* Get the op descriptors for tensorflow
* @return the op descriptors for tensorflow
* @throws Exception
*/
public static Map<String, OpDef> opDescs() throws Exception {
InputStream contents = new ClassPathResource("ops.proto").getInputStream();
try (BufferedInputStream bis2 = new BufferedInputStream(contents);
BufferedReader reader = new BufferedReader(new InputStreamReader(bis2))) {
org.tensorflow.framework.OpList.Builder builder = org.tensorflow.framework.OpList.newBuilder();
StringBuilder str = new StringBuilder();
String line = null;
while ((line = reader.readLine()) != null) {
// .append("\n");
str.append(line);
}
TextFormat.getParser().merge(str.toString(), builder);
List<OpDef> list = builder.getOpList();
Map<String, OpDef> map = new HashMap<>();
for (OpDef opDef : list) {
map.put(opDef.getName(), opDef);
}
return map;
} catch (Exception e2) {
e2.printStackTrace();
}
throw new ND4JIllegalStateException("Unable to load tensorflow descriptors!");
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class TFGraphMapper method mapTensorProto.
public INDArray mapTensorProto(TensorProto tfTensor) {
// building shape first
int dims = tfTensor.getTensorShape().getDimCount();
int[] arrayShape = null;
List<Integer> dimensions = new ArrayList<>();
for (int e = 0; e < dims; e++) {
// TODO: eventually we want long shapes :(
int dim = (int) tfTensor.getTensorShape().getDim(e).getSize();
dimensions.add(dim);
}
arrayShape = Ints.toArray(dimensions);
if (tfTensor.getDtype() == DataType.DT_INT32 || tfTensor.getDtype() == DataType.DT_INT16 || tfTensor.getDtype() == DataType.DT_INT8) {
// valueOf
if (tfTensor.getIntValCount() == 1 || ArrayUtil.prod(arrayShape) == 1) {
// straight zero case
if (tfTensor.getIntValCount() < 1)
return Nd4j.trueScalar(0.0);
// should be scalar otherwise
int val = tfTensor.getIntVal(0);
if (arrayShape == null || arrayShape.length == 0)
arrayShape = new int[] {};
INDArray array = Nd4j.valueArrayOf(arrayShape, (double) val);
return array;
} else if (tfTensor.getInt64ValCount() > 0) {
double[] jArray = new double[tfTensor.getIntValCount()];
for (int e = 0; e < tfTensor.getIntValCount(); e++) {
jArray[e] = (double) tfTensor.getIntVal(e);
}
// TF arrays are always C
INDArray array = Nd4j.create(jArray, arrayShape, 0, 'c');
return array;
} else {
// FIXME: INT bytebuffers should be converted to floating point
// throw new UnsupportedOperationException("To be implemented yet");
long length = ArrayUtil.prodLong(arrayShape);
// binary representation
val bb = tfTensor.getTensorContent().asReadOnlyByteBuffer();
val fb = bb.order(ByteOrder.nativeOrder()).asIntBuffer();
val fa = new float[fb.capacity()];
for (int e = 0; e < fb.capacity(); e++) fa[e] = (float) fb.get(e);
if (fa.length == 0)
throw new ND4JIllegalStateException("Can't find Tensor values! Probably you've forgot to freeze graph before saving?");
if (fa.length == 1)
return Nd4j.trueScalar(fa[0]);
if (arrayShape.length == 1)
return Nd4j.trueVector(fa);
val array = Nd4j.create(fa, arrayShape, 'c', 0);
// log.debug("Data: {}", Arrays.toString(array.data().asFloat()));
return array;
}
} else if (tfTensor.getDtype() == DataType.DT_FLOAT) {
if (tfTensor.getFloatValCount() == 1 || ArrayUtil.prod(arrayShape) == 1) {
// straight zero case
if (tfTensor.getFloatValCount() < 1)
return Nd4j.scalar(0.0);
float val = tfTensor.getFloatVal(0);
if (arrayShape == null || arrayShape.length == 0)
arrayShape = new int[] {};
INDArray array = Nd4j.valueArrayOf(arrayShape, (double) val);
return array;
} else if (tfTensor.getFloatValCount() > 0) {
float[] jArray = new float[tfTensor.getFloatValCount()];
for (int e = 0; e < tfTensor.getFloatValCount(); e++) {
jArray[e] = tfTensor.getFloatVal(e);
}
// FIXME: we're missing float[] signature
INDArray array = Nd4j.create(Nd4j.createBuffer(jArray), arrayShape, 'c');
return array;
} else if (tfTensor.getTensorContent().size() > 0) {
// binary representation
val bb = tfTensor.getTensorContent().asReadOnlyByteBuffer();
val fb = bb.order(ByteOrder.nativeOrder()).asFloatBuffer();
val fa = new float[fb.capacity()];
for (int e = 0; e < fb.capacity(); e++) fa[e] = fb.get(e);
if (fa.length == 0)
throw new ND4JIllegalStateException("Can't find Tensor values! Probably you've forgot to freeze graph before saving?");
if (fa.length == 1)
return Nd4j.trueScalar(fa[0]);
if (arrayShape.length == 1)
return Nd4j.trueVector(fa);
val array = Nd4j.create(fa, arrayShape, 'c', 0);
return array;
}
} else if (tfTensor.getDtype() == DataType.DT_DOUBLE) {
if (tfTensor.getDoubleValCount() == 1 || ArrayUtil.prod(arrayShape) == 1) {
// straight zero case
if (tfTensor.getDoubleValCount() < 1)
return Nd4j.trueScalar(0.0);
double val = tfTensor.getDoubleVal(0);
INDArray array = Nd4j.trueScalar(val);
return array;
} else if (tfTensor.getDoubleValCount() > 0) {
double[] jArray = new double[tfTensor.getDoubleValCount()];
for (int e = 0; e < tfTensor.getDoubleValCount(); e++) {
jArray[e] = tfTensor.getDoubleVal(e);
}
// TF arrays are always C
INDArray array = Nd4j.create(jArray, arrayShape, 0, 'c');
return array;
} else if (tfTensor.getTensorContent().size() > 0) {
// binary representation
// DataBuffer buffer = Nd4j.createBuffer(tfTensor.getTensorContent().asReadOnlyByteBuffer(), DataBuffer.Type.FLOAT, (int) length);
// INDArray array = Nd4j.createArrayFromShapeBuffer(buffer, Nd4j.getShapeInfoProvider().createShapeInformation(arrayShape, 'c'));
// binary representation
val bb = tfTensor.getTensorContent().asReadOnlyByteBuffer();
val fb = bb.order(ByteOrder.nativeOrder()).asDoubleBuffer();
val da = new double[fb.capacity()];
for (int e = 0; e < fb.capacity(); e++) da[e] = fb.get(e);
if (da.length == 0)
throw new ND4JIllegalStateException("Can't find Tensor values! Probably you've forgot to freeze graph before saving?");
if (da.length == 1)
return Nd4j.trueScalar(da[0]);
if (arrayShape.length == 1)
return Nd4j.trueVector(da);
val array = Nd4j.create(da, arrayShape, 0, 'c');
return array;
}
} else if (tfTensor.getDtype() == DataType.DT_INT64) {
if (tfTensor.getInt64ValCount() == 1 || ArrayUtil.prod(arrayShape) == 1) {
// straight zero case
if (tfTensor.getDoubleValCount() < 1)
return Nd4j.trueScalar(0.0);
double val = (double) tfTensor.getInt64Val(0);
INDArray array = Nd4j.trueScalar(val);
return array;
} else if (tfTensor.getInt64ValCount() > 0) {
double[] jArray = new double[tfTensor.getInt64ValCount()];
for (int e = 0; e < tfTensor.getInt64ValCount(); e++) {
jArray[e] = (double) tfTensor.getInt64Val(e);
}
// TF arrays are always C
INDArray array = Nd4j.create(jArray, arrayShape, 0, 'c');
return array;
} else if (tfTensor.getTensorContent().size() > 0) {
// throw new UnsupportedOperationException("To be implemented yet");
// Mapping INT bytebuffers should be converted to floating point
val bb = tfTensor.getTensorContent().asReadOnlyByteBuffer();
val lb = bb.order(ByteOrder.nativeOrder()).asLongBuffer();
val fa = new float[lb.capacity()];
for (int e = 0; e < lb.capacity(); e++) fa[e] = (float) lb.get(e);
if (fa.length == 0)
throw new ND4JIllegalStateException("Can't find Tensor values! Probably you've forgot to freeze graph before saving?");
if (fa.length == 1)
return Nd4j.trueScalar(fa[0]);
if (arrayShape.length == 1)
return Nd4j.trueVector(fa);
val array = Nd4j.create(fa, arrayShape, 'c', 0);
// log.debug("Data: {}", Arrays.toString(array.data().asFloat()));
return array;
}
} else {
throw new UnsupportedOperationException("Unknown dataType found: [" + tfTensor.getDtype() + "]");
}
throw new ND4JIllegalStateException("Invalid method state");
}
Aggregations