use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class ImgCropLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
assert 1 == inObj.length;
final Result input = inObj[0];
final TensorList inputData = input.getData();
assert 3 == inputData.getDimensions().length;
final int length = inputData.length();
@Nonnull int[] dimIn = inputData.getDimensions();
if (dimIn[0] == sizeX && dimIn[1] == sizeY) {
return input;
}
@Nonnull final int[] dimOut = Arrays.copyOf(dimIn, 3);
dimOut[0] = sizeX;
dimOut[1] = sizeY;
final TensorList outputData = CudaSystem.run(gpu -> {
@Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
inputData.freeRef();
boolean dirty = dimOut[0] <= dimIn[0] && dimOut[1] <= dimIn[1];
assert dimOut[0] > 0;
assert dimOut[1] > 0;
assert dimOut[2] > 0;
CudaTensor cudaTensor = copy(gpu, inputTensor, length, dimIn, dimOut, dirty, precision);
Stream.<ReferenceCounting>of(inputTensor).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(cudaTensor, length, dimOut, precision);
}, inputData);
return new Result(outputData, (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (!Arrays.equals(delta.getDimensions(), outputData.getDimensions())) {
throw new AssertionError(Arrays.toString(delta.getDimensions()) + " != " + Arrays.toString(outputData.getDimensions()));
}
if (delta.length() != outputData.length()) {
throw new AssertionError(delta.length() + " != " + outputData.length());
}
assert delta.length() == length;
if (input.isAlive()) {
final TensorList passbackTensorList = CudaSystem.run(gpu -> {
@Nullable final CudaTensor errorPtr = gpu.getTensor(delta, precision, MemoryType.Device, false);
delta.freeRef();
boolean dirty = dimOut[0] >= dimIn[0] && dimOut[1] >= dimIn[1];
CudaTensor cudaTensor = copy(gpu, errorPtr, length, dimOut, dimIn, dirty, precision);
Stream.<ReferenceCounting>of(errorPtr).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(cudaTensor, length, dimIn, precision);
}, delta);
input.accumulate(buffer, passbackTensorList);
} else {
delta.freeRef();
}
}) {
@Override
public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return Arrays.stream(inObj).anyMatch(x -> x.isAlive());
}
};
}
use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class ImgTileAssemblyLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
if (1 == inObj.length) {
return inObj[0];
}
int[] inputDimensions = inObj[0].getData().getDimensions();
assert 3 == inputDimensions.length;
final int length = inObj[0].getData().length();
int[] outputDims = getOutputDims(inObj);
final TensorList outputData = CudaSystem.run(gpu -> {
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
assert outputDims[0] > 0;
assert outputDims[1] > 0;
assert outputDims[2] > 0;
@Nonnull final CudaMemory outputBuffer = gpu.allocate((long) length * outputDims[2] * outputDims[1] * outputDims[0] * precision.size, MemoryType.Managed.normalize(), false);
int totalWidth = 0;
int totalHeight = 0;
int inputIndex = 0;
List<CopyParams> copies = new ArrayList<>();
for (int row = 0; row < rows; row++) {
int positionX = 0;
int rowHeight = 0;
for (int col = 0; col < columns; col++) {
int[] tileDimensions = inObj[inputIndex].getData().getDimensions();
rowHeight = Math.max(rowHeight, tileDimensions[1]);
copies.add(new CopyParams(gpu, inObj, outputBuffer, length, outputDims, tileDimensions, inputIndex, positionX, totalHeight));
positionX += tileDimensions[0];
inputIndex += 1;
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
}
totalHeight += rowHeight;
totalWidth = Math.max(totalWidth, positionX);
}
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
Stream<CopyParams> stream = copies.stream();
if (!CoreSettings.INSTANCE.isSingleThreaded() && parallel)
stream = stream.parallel();
stream.forEach(this::copy);
Arrays.stream(inObj).forEach(r -> r.getData().freeRef());
CudaDevice.CudaTensorDescriptor descriptor = gpu.newTensorDescriptor(precision, length, outputDims[2], outputDims[1], outputDims[0]);
CudaTensor ptr = CudaTensor.wrap(outputBuffer, descriptor, precision);
return CudaTensorList.wrap(ptr, length, outputDims, precision);
}, Arrays.stream(inObj).map(Result::getData).toArray());
return new Result(outputData, (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList error) -> {
if (!Arrays.equals(error.getDimensions(), outputData.getDimensions())) {
throw new AssertionError(Arrays.toString(error.getDimensions()) + " != " + Arrays.toString(outputData.getDimensions()));
}
if (error.length() != outputData.length()) {
throw new AssertionError(error.length() + " != " + outputData.length());
}
assert error.length() == length;
int totalHeight = 0;
int inputIndex = 0;
List<BackpropParams> tasks = new ArrayList<>();
for (int row = 0; row < rows; row++) {
int positionX = 0;
int rowHeight = 0;
for (int col = 0; col < columns; col++) {
Result in = inObj[inputIndex];
int[] tileDimensions = in.getData().getDimensions();
rowHeight = Math.max(rowHeight, tileDimensions[1]);
if (inObj[inputIndex].isAlive()) {
tasks.add(new BackpropParams(inObj, buffer, error, outputDims, tileDimensions, length, positionX, totalHeight, inputIndex));
}
positionX += tileDimensions[0];
inputIndex += 1;
}
totalHeight += rowHeight;
}
Stream<BackpropParams> stream = tasks.stream();
if (!CoreSettings.INSTANCE.isSingleThreaded() && parallel)
stream = stream.parallel();
stream.forEach(this::backprop);
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return Arrays.stream(inObj).anyMatch(x -> x.isAlive());
}
};
}
use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class ImgTileSelectLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
assert 1 == inObj.length;
final Result input = inObj[0];
final TensorList inputData = input.getData();
assert 3 == inputData.getDimensions().length;
final int length = inputData.length();
@Nonnull int[] dimIn = inputData.getDimensions();
if (dimIn[0] == sizeY && dimIn[1] == sizeX) {
return input;
}
@Nonnull final int[] dimOut = getViewDimensions(dimIn, new int[] { sizeY, sizeX, dimIn[2] }, new int[] { positionX, positionY, 0 });
final TensorList outputData = CudaSystem.run(gpu -> {
assert dimOut[0] > 0;
assert dimOut[1] > 0;
assert dimOut[2] > 0;
boolean dirty = dimOut[0] == dimIn[0] && dimOut[1] == dimIn[1];
CudaTensor cudaTensor = copy(gpu, inputData, dimIn, dimOut, precision, this.positionX, this.positionY, dirty);
return CudaTensorList.wrap(cudaTensor, length, dimOut, precision);
}, inputData);
int[] outputDimensions = outputData.getDimensions();
assert length == outputData.length();
return new Result(outputData, (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList error) -> {
if (!Arrays.equals(error.getDimensions(), outputDimensions)) {
throw new AssertionError(Arrays.toString(error.getDimensions()) + " != " + Arrays.toString(outputDimensions));
}
if (error.length() != length) {
throw new AssertionError(error.length() + " != " + length);
}
assert error.length() == inputData.length();
if (input.isAlive()) {
final TensorList passbackTensorList = CudaSystem.run(gpu -> {
boolean dirty = dimOut[0] >= dimIn[0] && dimOut[1] >= dimIn[1];
CudaTensor cudaTensor = copy(gpu, error, dimOut, dimIn, precision, -this.positionX, -this.positionY, dirty);
return CudaTensorList.wrap(cudaTensor, length, dimIn, precision);
}, error);
input.accumulate(buffer, passbackTensorList);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return Arrays.stream(inObj).anyMatch(x -> x.isAlive());
}
};
}
use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class ImgTileSubnetLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
assert 1 == inObj.length;
Result input = inObj[0];
TensorList inputData = input.getData();
@Nonnull final int[] inputDims = inputData.getDimensions();
assert 3 == inputDims.length;
int bands = inputDims[2];
int length = inputData.length();
CudaTensor passback = CudaSystem.run(gpu -> {
return CudaTensor.wrap(gpu.allocate(inputData.getElements() * precision.size, MemoryType.Managed, true), gpu.newTensorDescriptor(precision, length, inputDims[2], inputDims[1], inputDims[0]), precision);
});
try {
AtomicInteger counter = new AtomicInteger(0);
int cols = (int) (Math.ceil((inputDims[0] - width) * 1.0 / strideX) + 1);
int rows = (int) (Math.ceil((inputDims[1] - height) * 1.0 / strideY) + 1);
if (cols == 1 && rows == 1)
return getInner().evalAndFree(inObj);
ArrayList<CudaTensor> tiles = new ArrayList<>();
int[] tileDimensions = { width, height, bands };
Result[][] tileResults = new Result[rows][];
for (int row = 0; row < rows; row++) {
tileResults[row] = new Result[cols];
for (int col = 0; col < cols; col++) {
int positionX = col * strideX;
int positionY = row * strideY;
assert positionX >= 0;
assert positionY >= 0;
assert positionX < inputDims[0];
assert positionY < inputDims[1];
CudaTensor tile = CudaSystem.run(gpu -> {
return ImgTileSelectLayer.copy(gpu, inputData, inputData.getDimensions(), tileDimensions, precision, positionX, positionY, true);
});
passback.addRef();
tileResults[row][col] = getInner().evalAndFree(new Result(CudaTensorList.wrap(tile, length, tileDimensions, precision), (DeltaSet<Layer> ctx, TensorList delta) -> {
CudaSystem.run(gpu -> {
ImgTileSelectLayer.copy(gpu, delta, tileDimensions, -positionX, -positionY, precision, passback).freeRef();
});
if (counter.incrementAndGet() >= rows * cols) {
counter.set(0);
input.accumulate(ctx, CudaTensorList.create(passback, length, inputDims, precision));
}
}) {
@Override
protected void _free() {
super._free();
passback.freeRef();
}
});
}
}
inputData.freeRef();
logger.debug(String.format("Broke input %s into %s rows, %s cols", Arrays.toString(inputDims), rows, cols));
Result result = new ImgTileAssemblyLayer(cols, rows).setParallel(parallel).setPrecision(precision).evalAndFree(Arrays.stream(tileResults).flatMap(Arrays::stream).toArray(i -> new Result[i]));
return new Result(result.getData(), (ctx, delta) -> {
result.accumulate(ctx, delta);
}) {
@Override
public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
super._free();
result.freeRef();
input.freeRef();
}
};
} finally {
passback.freeRef();
}
}
use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class NProductLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
if (inObj.length <= 1) {
throw new IllegalArgumentException("inObj.length=" + inObj.length);
}
@Nonnull final int[] dimensions = inObj[0].getData().getDimensions();
final int length = inObj[0].getData().length();
if (3 != dimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
}
for (int i = 1; i < inObj.length; i++) {
TensorList data = inObj[i].getData();
if (Tensor.length(dimensions) != Tensor.length(data.getDimensions())) {
throw new IllegalArgumentException(Arrays.toString(dimensions) + " != " + Arrays.toString(data.getDimensions()));
}
}
return new Result(CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
@Nonnull final TensorList result1 = Arrays.stream(inObj).map(x -> {
TensorList data = x.getData();
data.addRef();
return data;
}).reduce((l, r) -> {
@Nullable final CudaTensor lPtr = gpu.getTensor(l, precision, MemoryType.Device, false);
@Nullable final CudaTensor rPtr = gpu.getTensor(r, precision, MemoryType.Device, false);
// assert lPtr.memory.size == rPtr.memory.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) outputDescriptor.nStride * length * precision.size, MemoryType.Device, true);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
CudaMemory rPtrMemory = rPtr.getMemory(gpu);
CudaSystem.handle(JCudnn.cudnnOpTensor(gpu.handle, opDescriptor.getPtr(), precision.getPointer(1.0), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
lPtrMemory.dirty();
rPtrMemory.dirty();
outputPtr.dirty();
lPtrMemory.freeRef();
rPtrMemory.freeRef();
Arrays.stream(new ReferenceCounting[] { lPtr, rPtr, l, r }).forEach(ReferenceCounting::freeRef);
outputDescriptor.addRef();
return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, dimensions, precision);
}).get();
Arrays.stream(new ReferenceCounting[] { opDescriptor, outputDescriptor }).forEach(ReferenceCounting::freeRef);
return result1;
}, Arrays.stream(inObj).map(Result::getData).toArray()), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
for (int index = 0; index < inObj.length; index++) {
final Result input = inObj[index];
if (input.isAlive()) {
final int _index = index;
@Nonnull TensorList data = IntStream.range(0, inObj.length).mapToObj(i -> {
TensorList tensorList = i == _index ? delta : inObj[i].getData();
tensorList.addRef();
return tensorList;
}).reduce((l, r) -> {
return CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
@Nullable final CudaTensor lPtr = gpu.getTensor(l, precision, MemoryType.Device, false);
@Nullable final CudaTensor rPtr = gpu.getTensor(r, precision, MemoryType.Device, false);
// assert lPtr.memory.size == rPtr.memory.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) outputDescriptor.nStride * length * precision.size, MemoryType.Device, true);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
CudaMemory rPtrMemory = rPtr.getMemory(gpu);
CudaSystem.handle(JCudnn.cudnnOpTensor(gpu.handle, opDescriptor.getPtr(), precision.getPointer(1.0), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
lPtrMemory.dirty();
rPtrMemory.dirty();
outputPtr.dirty();
lPtrMemory.freeRef();
rPtrMemory.freeRef();
Stream.of(lPtr, rPtr, opDescriptor, l, r).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, dimensions, precision);
}, l, r);
}).get();
input.accumulate(buffer, data);
}
}
delta.freeRef();
}) {
@Override
public final void accumulate(DeltaSet<Layer> buffer, TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
for (int i = 0; i < inObj.length; i++) {
inObj[i].getData().freeRef();
}
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
Aggregations