use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class CudaExecutioner method invoke.
protected CudaContext invoke(IndexAccumulation op, int[] dimension) {
long st = profilingHookIn(op);
if (dimension == null || (dimension.length == 1 && dimension[0] == Integer.MAX_VALUE)) {
if (op.z() == op.x() || op.z() == null) {
op.setZ(Nd4j.scalar(0.0));
}
}
checkForCompression(op);
validateDataType(Nd4j.dataType(), op);
if (extraz.get() == null)
extraz.set(new PointerPointer(32));
if (CudaEnvironment.getInstance().getConfiguration().isDebug())
lastOp.set(op.opName());
CudaEnvironment.getInstance().getConfiguration().enableDebug(true);
for (int i = 0; i < dimension.length; i++) if (dimension[i] >= op.x().rank() && dimension[i] != Integer.MAX_VALUE)
throw new ND4JIllegalStateException("Op target dimension " + Arrays.toString(dimension) + " contains element that higher then rank of op.X: [" + op.x().rank() + "]");
CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(op.z().isScalar() ? null : op.z(), op.x(), op.y());
Pointer x = AtomicAllocator.getInstance().getPointer(op.x(), context);
Pointer xShapeInfo = AtomicAllocator.getInstance().getPointer(op.x().shapeInfoDataBuffer(), context);
Pointer extraArgs = op.extraArgs() != null ? AtomicAllocator.getInstance().getPointer(op.extraArgsDataBuff(), context) : null;
Pointer hostYShapeInfo = op.y() == null ? null : AddressRetriever.retrieveHostPointer(op.y().shapeInfoDataBuffer());
Pointer hostZShapeInfo = op.z() == null ? null : AddressRetriever.retrieveHostPointer(op.z().shapeInfoDataBuffer());
int[] fdimension = dimension;
if (fdimension == null)
fdimension = new int[] { 0 };
Pair<DataBuffer, DataBuffer> tadBuffers = tadManager.getTADOnlyShapeInfo(op.x(), fdimension);
Pointer hostTadShapeInfo = AddressRetriever.retrieveHostPointer(tadBuffers.getFirst());
Pointer devTadShapeInfo = AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context);
DataBuffer offsets = tadBuffers.getSecond();
Pointer devTadOffsets = offsets == null ? null : AtomicAllocator.getInstance().getPointer(offsets, context);
PointerPointer xShapeInfoHostPointer = extraz.get().put(AddressRetriever.retrieveHostPointer(op.x().shapeInfoDataBuffer()), context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer(), context.getBufferAllocation(), context.getBufferReduction(), context.getBufferScalar(), context.getBufferSpecial(), hostYShapeInfo, hostZShapeInfo, hostTadShapeInfo, devTadShapeInfo, devTadOffsets);
if (op.z().isScalar() || dimension == null || dimension[0] == Integer.MAX_VALUE) {
if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
double result = nativeOps.execIndexReduceScalarDouble(xShapeInfoHostPointer, op.opNum(), (DoublePointer) x, (IntPointer) xShapeInfo, (DoublePointer) extraArgs);
op.setFinalResult((int) result);
op.z().assign(result);
} else if (op.x().data().dataType() == DataBuffer.Type.FLOAT) {
float result = nativeOps.execIndexReduceScalarFloat(xShapeInfoHostPointer, op.opNum(), (FloatPointer) x, (IntPointer) xShapeInfo, (FloatPointer) extraArgs);
op.setFinalResult((int) result);
op.z().assign(result);
} else {
float result = nativeOps.execIndexReduceScalarHalf(xShapeInfoHostPointer, op.opNum(), (ShortPointer) x, (IntPointer) xShapeInfo, (ShortPointer) extraArgs);
op.setFinalResult((int) result);
op.z().assign(result);
}
} else {
Arrays.sort(dimension);
Pointer z = AtomicAllocator.getInstance().getPointer(op.z(), context);
Pointer zShapeInfo = AtomicAllocator.getInstance().getPointer(op.z().shapeInfoDataBuffer(), context);
// long dimensionPointer = AtomicAllocator.getInstance().getPointer(Nd4j.createBuffer(dimension), context);
Pointer dimensionPointer = AtomicAllocator.getInstance().getPointer(AtomicAllocator.getInstance().getConstantBuffer(dimension), context);
if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.execIndexReduceDouble(xShapeInfoHostPointer, op.opNum(), (DoublePointer) x, (IntPointer) xShapeInfo, (DoublePointer) extraArgs, (DoublePointer) z, (IntPointer) zShapeInfo, (IntPointer) dimensionPointer, dimension.length);
} else if (op.x().data().dataType() == DataBuffer.Type.FLOAT) {
nativeOps.execIndexReduceFloat(xShapeInfoHostPointer, op.opNum(), (FloatPointer) x, (IntPointer) xShapeInfo, (FloatPointer) extraArgs, (FloatPointer) z, (IntPointer) zShapeInfo, (IntPointer) dimensionPointer, dimension.length);
} else {
nativeOps.execIndexReduceHalf(xShapeInfoHostPointer, op.opNum(), (ShortPointer) x, (IntPointer) xShapeInfo, (ShortPointer) extraArgs, (ShortPointer) z, (IntPointer) zShapeInfo, (IntPointer) dimensionPointer, dimension.length);
}
}
AtomicAllocator.getInstance().registerAction(context, null, op.x(), op.y());
profilingHookOut(op, st);
return null;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class CudaExecutioner method bitmapDecode.
@Override
public INDArray bitmapDecode(INDArray encoded, INDArray target) {
CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(target);
if (extraz.get() == null)
extraz.set(new PointerPointer(32));
PointerPointer extras = extraz.get().put(AtomicAllocator.getInstance().getHostPointer(target), context.getOldStream(), context.getBufferScalar(), context.getBufferReduction());
if (target.data().dataType() == DataBuffer.Type.FLOAT) {
nativeOps.decodeBitmapFloat(extras, AtomicAllocator.getInstance().getPointer(encoded.data(), context), target.lengthLong(), (FloatPointer) AtomicAllocator.getInstance().getPointer(target, context));
} else if (target.data().dataType() == DataBuffer.Type.DOUBLE) {
nativeOps.decodeBitmapDouble(extras, AtomicAllocator.getInstance().getPointer(encoded.data(), context), target.lengthLong(), (DoublePointer) AtomicAllocator.getInstance().getPointer(target, context));
} else if (target.data().dataType() == DataBuffer.Type.HALF) {
nativeOps.decodeBitmapHalf(extras, AtomicAllocator.getInstance().getPointer(encoded.data(), context), target.lengthLong(), (ShortPointer) AtomicAllocator.getInstance().getPointer(target, context));
} else
throw new ND4JIllegalStateException("Unknown dataType " + target.data().dataType());
AtomicAllocator.getInstance().getFlowController().registerAction(context, target);
return target;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class VoidParameterServer method getRole.
/**
* This method checks for designated role, according to local IP addresses and configuration passed into method
*
* @param voidConfiguration
* @param localIPs
* @return
*/
protected Pair<NodeRole, String> getRole(@NonNull VoidConfiguration voidConfiguration, @NonNull Collection<String> localIPs) {
NodeRole result = NodeRole.CLIENT;
for (String ip : voidConfiguration.getShardAddresses()) {
String cleansed = ip.replaceAll(":.*", "");
if (localIPs.contains(cleansed))
return Pair.create(NodeRole.SHARD, ip);
}
if (voidConfiguration.getBackupAddresses() != null)
for (String ip : voidConfiguration.getBackupAddresses()) {
String cleansed = ip.replaceAll(":.*", "");
if (localIPs.contains(cleansed))
return Pair.create(NodeRole.BACKUP, ip);
}
String sparkIp = null;
if (sparkIp == null && voidConfiguration.getNetworkMask() != null) {
NetworkOrganizer organizer = new NetworkOrganizer(voidConfiguration.getNetworkMask());
sparkIp = organizer.getMatchingAddress();
}
// last resort here...
if (sparkIp == null)
sparkIp = System.getenv("DL4J_VOID_IP");
log.info("Got [{}] as sparkIp", sparkIp);
if (sparkIp == null)
throw new ND4JIllegalStateException("Can't get IP address for UDP communcation");
// local IP from pair is used for shard only, so we don't care
return Pair.create(result, sparkIp + ":" + voidConfiguration.getUnicastPort());
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class VoidConfiguration method validateNetmask.
protected void validateNetmask() {
if (networkMask == null)
return;
// micro-validaiton here
String[] chunks = networkMask.split("\\.");
if (chunks.length == 1 || networkMask.isEmpty())
throw new ND4JIllegalStateException("Provided netmask doesn't look like a legit one. Proper format is: 192.168.1.0/24 or 10.0.0.0/8");
// TODO: add support for IPv6 eventually here
if (chunks.length != 4) {
throw new ND4JIllegalStateException("4 octets expected here for network mask");
}
for (int i = 0; i < 3; i++) {
String curr = chunks[i];
try {
int conv = Integer.valueOf(curr);
if (conv < 0 || conv > 255)
throw new ND4JIllegalStateException();
} catch (Exception e) {
throw new ND4JIllegalStateException("All IP address octets should be in range of 0...255");
}
}
if (Integer.valueOf(chunks[0]) == 0)
throw new ND4JIllegalStateException("First network mask octet should be non-zero. I.e. 10.0.0.0/8");
// we enforce last octet to be 0/24 always
if (!networkMask.contains("/") || !chunks[3].startsWith("0")) {
chunks[3] = "0/24";
}
this.networkMask = chunks[0] + "." + chunks[1] + "." + chunks[2] + "." + chunks[3];
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class BasicWorkspaceTests method testAllocation4.
@Test
public void testAllocation4() throws Exception {
WorkspaceConfiguration failConfig = WorkspaceConfiguration.builder().initialSize(1024 * 1024).maxSize(1024 * 1024).overallocationLimit(0.1).policyAllocation(AllocationPolicy.STRICT).policyLearning(LearningPolicy.FIRST_LOOP).policyMirroring(MirroringPolicy.FULL).policySpill(SpillPolicy.FAIL).build();
Nd4jWorkspace workspace = (Nd4jWorkspace) Nd4j.getWorkspaceManager().createNewWorkspace(failConfig);
Nd4j.getMemoryManager().setCurrentWorkspace(workspace);
assertNotEquals(null, Nd4j.getMemoryManager().getCurrentWorkspace());
assertEquals(0, workspace.getHostOffset());
INDArray array = Nd4j.create(new int[] { 1, 5 }, 'c');
// checking if allocation actually happened
long reqMem = 5 * Nd4j.sizeOfDataType();
assertEquals(reqMem + reqMem % 8, workspace.getHostOffset());
try {
INDArray array2 = Nd4j.create(10000000);
assertTrue(false);
} catch (ND4JIllegalStateException e) {
assertTrue(true);
}
assertEquals(reqMem + reqMem % 8, workspace.getHostOffset());
INDArray array2 = Nd4j.create(new int[] { 1, 5 }, 'c');
assertEquals((reqMem + reqMem % 8) * 2, workspace.getHostOffset());
}
Aggregations