use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class JCublasNDArray method leverageTo.
@Override
public INDArray leverageTo(String id) {
if (!isAttached()) {
// log.info("Skipping detached");
return this;
}
if (!Nd4j.getWorkspaceManager().checkIfWorkspaceExists(id)) {
// log.info("Skipping non-existent");
return this;
}
MemoryWorkspace current = Nd4j.getMemoryManager().getCurrentWorkspace();
MemoryWorkspace target = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(id);
if (current == target) {
// log.info("Skipping equals A");
return this;
}
if (this.data.getParentWorkspace() == target) {
// log.info("Skipping equals B");
return this;
}
Nd4j.getMemoryManager().setCurrentWorkspace(target);
// log.info("Leveraging...");
INDArray copy = null;
if (!this.isView()) {
// if (1 < 0) {
Nd4j.getExecutioner().commit();
DataBuffer buffer = Nd4j.createBuffer(this.lengthLong(), false);
AllocationPoint pointDst = AtomicAllocator.getInstance().getAllocationPoint(buffer);
AllocationPoint pointSrc = AtomicAllocator.getInstance().getAllocationPoint(this.data);
CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(pointDst, pointSrc);
/*
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memsetAsync(pointDst.getDevicePointer(), 0, 1, 0, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memsetAsync 1 failed");
context.syncOldStream();
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memsetAsync(pointSrc.getDevicePointer(), 0, 1, 0, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memsetAsync 2 failed");
context.syncOldStream();
*/
if (pointSrc.isActualOnDeviceSide()) {
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getDevicePointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyDeviceToDevice, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memcpyAsync failed");
} else {
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getHostPointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memcpyAsync failed");
}
context.syncOldStream();
copy = Nd4j.createArrayFromShapeBuffer(buffer, this.shapeInfoDataBuffer());
// tag buffer as valid on device side
pointDst.tickHostRead();
pointDst.tickDeviceWrite();
AtomicAllocator.getInstance().getFlowController().registerAction(context, pointDst, pointSrc);
} else {
copy = this.dup(this.ordering());
Nd4j.getExecutioner().commit();
}
Nd4j.getMemoryManager().setCurrentWorkspace(current);
return copy;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class JCublasNDArray method migrate.
/**
* This method pulls this INDArray into current Workspace.
*
* PLEASE NOTE: If there's no current Workspace - INDArray returned as is
*
* @return
*/
@Override
public INDArray migrate() {
MemoryWorkspace current = Nd4j.getMemoryManager().getCurrentWorkspace();
if (current == null)
return this;
INDArray copy = null;
if (!this.isView()) {
Nd4j.getExecutioner().commit();
DataBuffer buffer = Nd4j.createBuffer(this.lengthLong(), false);
AllocationPoint pointDst = AtomicAllocator.getInstance().getAllocationPoint(buffer);
AllocationPoint pointSrc = AtomicAllocator.getInstance().getAllocationPoint(this.data);
// CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext();
CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(pointDst, pointSrc);
if (pointSrc.isActualOnDeviceSide()) {
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getDevicePointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyDeviceToDevice, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memcpyAsync failed");
} else {
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getHostPointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memcpyAsync failed");
}
context.syncOldStream();
if (pointDst.getDeviceId() != Nd4j.getMemoryManager().getCurrentWorkspace().getDeviceId()) {
// log.info("Swapping [{}] -> [{}]", pointDst.getDeviceId(), Nd4j.getMemoryManager().getCurrentWorkspace().getDeviceId());
pointDst.setDeviceId(Nd4j.getMemoryManager().getCurrentWorkspace().getDeviceId());
}
copy = Nd4j.createArrayFromShapeBuffer(buffer, this.shapeInfoDataBuffer());
// tag buffer as valid on device side
pointDst.tickHostRead();
pointDst.tickDeviceWrite();
AtomicAllocator.getInstance().getFlowController().registerAction(context, pointDst, pointSrc);
} else {
copy = this.dup(this.ordering());
}
return copy;
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class CudaGridExecutioner method buildZ.
protected void buildZ(Accumulation op, int... dimension) {
Arrays.sort(dimension);
for (int i = 0; i < dimension.length; i++) {
if (dimension[i] < 0)
dimension[i] += op.x().rank();
}
// do op along all dimensions
if (dimension.length == op.x().rank())
dimension = new int[] { Integer.MAX_VALUE };
int[] retShape = Shape.wholeArrayDimension(dimension) ? new int[] { 1, 1 } : ArrayUtil.removeIndex(op.x().shape(), dimension);
// ensure vector is proper shape
if (retShape.length == 1) {
if (dimension[0] == 0)
retShape = new int[] { 1, retShape[0] };
else
retShape = new int[] { retShape[0], 1 };
} else if (retShape.length == 0) {
retShape = new int[] { 1, 1 };
}
/*
if(op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape))
return op.noOp();
*/
INDArray ret = null;
if (op.z() == null || op.z() == op.x()) {
if (op.isComplexAccumulation()) {
int xT = op.x().tensorssAlongDimension(dimension);
int yT = op.y().tensorssAlongDimension(dimension);
ret = Nd4j.create(xT, yT);
} else {
if (Math.abs(op.zeroDouble()) < Nd4j.EPS_THRESHOLD) {
ret = Nd4j.zeros(retShape);
} else {
ret = Nd4j.valueArrayOf(retShape, op.zeroDouble());
}
}
op.setZ(ret);
} else {
// compare length
if (op.z().lengthLong() != ArrayUtil.prodLong(retShape))
throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]");
if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
op.z().assign(op.zeroDouble());
} else if (op.x().data().dataType() == DataBuffer.Type.FLOAT) {
op.z().assign(op.zeroFloat());
} else if (op.x().data().dataType() == DataBuffer.Type.HALF) {
op.z().assign(op.zeroHalf());
}
ret = op.z();
}
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NetworkOrganizer method getIntersections.
/**
* This method returns specified numbers of IP's by parsing original list of trees into some form of binary tree
*
* @param numShards
* @param primary
* @return
*/
protected List<String> getIntersections(int numShards, Collection<String> primary) {
/**
* Since each ip address can be represented in 4-byte sequence, 1 byte per value, with leading order - we'll use that to build tree
*/
if (primary == null) {
for (NetworkInformation information : informationCollection) {
for (String ip : information.getIpAddresses()) {
// first we get binary representation for each IP
String octet = convertIpToOctets(ip);
// then we map each of them into virtual "tree", to find most popular networks within cluster
tree.map(octet);
}
}
// we get most "popular" A network from tree now
String octetA = tree.getHottestNetworkA();
List<String> candidates = new ArrayList<>();
AtomicInteger matchCount = new AtomicInteger(0);
for (NetworkInformation node : informationCollection) {
for (String ip : node.getIpAddresses()) {
String octet = convertIpToOctets(ip);
// calculating matches
if (octet.startsWith(octetA)) {
matchCount.incrementAndGet();
candidates.add(ip);
break;
}
}
}
/**
* TODO: improve this. we just need to iterate over popular networks instead of single top A network
*/
if (matchCount.get() != informationCollection.size())
throw new ND4JIllegalStateException("Mismatching A class");
Collections.shuffle(candidates);
return new ArrayList<>(candidates.subList(0, Math.min(numShards, candidates.size())));
} else {
// if primary isn't null, we expect network to be already filtered
String octetA = tree.getHottestNetworkA();
List<String> candidates = new ArrayList<>();
for (NetworkInformation node : informationCollection) {
for (String ip : node.getIpAddresses()) {
String octet = convertIpToOctets(ip);
// calculating matches
if (octet.startsWith(octetA) && !primary.contains(ip)) {
candidates.add(ip);
break;
}
}
}
Collections.shuffle(candidates);
return new ArrayList<>(candidates.subList(0, Math.min(numShards, candidates.size())));
}
}
use of org.nd4j.linalg.exception.ND4JIllegalStateException in project nd4j by deeplearning4j.
the class NetworkOrganizer method toBinaryOctet.
protected static String toBinaryOctet(@NonNull Integer value) {
if (value < 0 || value > 255)
throw new ND4JIllegalStateException("IP octets cant hold values below 0 or above 255");
String octetBase = Integer.toBinaryString(value);
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 8 - octetBase.length(); i++) {
builder.append("0");
}
builder.append(octetBase);
return builder.toString();
}
Aggregations