use of com.simiacryptus.mindseye.lang.cudnn.CudaTensor in project MindsEye by SimiaCryptus.
the class ActivationLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
// assert Arrays.stream(inObj).flatMapToDouble(input->input.data.stream().flatMapToDouble(x-> Arrays.stream(x.getData()))).allMatch(v->Double.isFinite(v));
final Result inputResult = inObj[0];
final TensorList inputData = inputResult.getData();
@Nonnull final int[] inputSize = inputData.getDimensions();
@Nonnull final int[] outputSize = inputSize;
final int length = inputData.length();
final int inputDims = Tensor.length(inputSize);
try {
final CudaTensor outPtr = CudaSystem.run(gpu -> {
@Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
final CudaTensor outputTensor;
if (1 == inputData.currentRefCount() && 1 == inputTensor.currentRefCount() && (!inputResult.isAlive() || mode == Mode.RELU.id)) {
inputTensor.addRef();
outputTensor = inputTensor;
} else {
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, inputSize[2], inputSize[1], inputSize[0], inputSize[2] * inputSize[1] * inputSize[0], inputSize[1] * inputSize[0], inputSize[0], 1);
@Nonnull final CudaMemory outputData = gpu.allocate((long) precision.size * inputDims * length, MemoryType.Managed.normalize(), true);
outputTensor = CudaTensor.wrap(outputData, outputDescriptor, precision);
}
@Nonnull final CudaResource<cudnnActivationDescriptor> activationDesc = gpu.newActivationDescriptor(mode, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, 0);
try {
CudaMemory memory = inputTensor.getMemory(gpu);
CudaMemory tensorMemory = outputTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnActivationForward(activationDesc.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), memory.getPtr(), precision.getPointer(0.0), outputTensor.descriptor.getPtr(), tensorMemory.getPtr()));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
memory.dirty();
tensorMemory.dirty();
tensorMemory.freeRef();
memory.freeRef();
return outputTensor;
} catch (@Nonnull final Throwable e) {
throw new ComponentException("Error apply " + Arrays.toString(inputSize), e);
} finally {
activationDesc.freeRef();
inputTensor.freeRef();
}
}, inputData);
return new Result(CudaTensorList.create(outPtr, length, outputSize, precision), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (inputResult.isAlive()) {
final TensorList data = CudaSystem.run(gpu -> {
@Nullable CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, true);
@Nullable CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
assert length == delta.length();
CudaTensor localOut = outPtr.getDense(gpu);
delta.freeRef();
CudaTensor passbackTensor;
// if (sameStrides(deltaTensor.descriptor, inputTensor.descriptor)) {
// passbackTensor = deltaTensor;
// passbackTensor.addRef();
// }
// else {
// passbackTensor = deltaTensor.getDense(gpu);
// inputTensor = inputTensor.getDenseAndFree(gpu);
// }
passbackTensor = CudaTensor.wrap(gpu.allocate((long) Tensor.length(inputSize) * length * precision.size, MemoryType.Managed.normalize(), false), gpu.newTensorDescriptor(precision, length, inputSize[2], inputSize[1], inputSize[0], inputSize[2] * inputSize[1] * inputSize[0], inputSize[1] * inputSize[0], inputSize[0], 1), precision);
@Nonnull final CudaResource<cudnnActivationDescriptor> activationDesc = gpu.newActivationDescriptor(mode, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, 0);
try {
CudaMemory localOutMemory = localOut.getMemory(gpu);
CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
CudaMemory inputTensorMemory = inputTensor.getMemory(gpu);
CudaMemory passbackTensorMemory = passbackTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnActivationBackward(activationDesc.getPtr(), precision.getPointer(1.0), localOut.descriptor.getPtr(), localOutMemory.getPtr(), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), inputTensor.descriptor.getPtr(), inputTensorMemory.getPtr(), precision.getPointer(0.0), passbackTensor.descriptor.getPtr(), passbackTensorMemory.getPtr()));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
localOutMemory.dirty();
deltaTensorMemory.dirty();
inputTensorMemory.dirty();
passbackTensorMemory.dirty();
localOutMemory.freeRef();
deltaTensorMemory.freeRef();
inputTensorMemory.freeRef();
passbackTensorMemory.freeRef();
} catch (@Nonnull final Throwable e) {
throw new ComponentException("Error apply " + Arrays.toString(inputSize), e);
} finally {
localOut.freeRef();
inputTensor.freeRef();
deltaTensor.freeRef();
activationDesc.freeRef();
}
return CudaTensorList.wrap(passbackTensor, length, inputSize, precision);
}, delta);
inputResult.accumulate(buffer, data);
} else {
delta.freeRef();
}
}) {
@Override
public final void accumulate(DeltaSet<Layer> buffer, TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
inputData.freeRef();
outPtr.freeRef();
inputResult.freeRef();
}
@Override
public boolean isAlive() {
return inputResult.isAlive() || !isFrozen();
}
};
} catch (@Nonnull final Throwable e) {
throw new ComponentException("Error apply image res " + Arrays.toString(inputSize), e);
}
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaTensor in project MindsEye by SimiaCryptus.
the class BandAvgReducerLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
final Result input = inObj[0];
TensorList inputData = input.getData();
@Nonnull final int[] inputSize = inputData.getDimensions();
int length = inputData.length();
final int bands = inputSize[2];
CudaTensorList result = CudaSystem.run(gpu -> {
CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, bands, 1, 1);
long size = (long) precision.size * outputDescriptor.nStride * length;
@Nonnull final CudaMemory outputPtr = gpu.allocate(size, MemoryType.Managed, true);
CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_AVG, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
CudaMemory inputMemory = inputTensor.getMemory(gpu);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr());
outputPtr.dirty();
inputMemory.dirty();
Stream.of(inputMemory, inputTensor, reduceTensorDescriptor, workspacePtr, indexPtr, inputData).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, new int[] { 1, 1, bands }, precision);
});
int pixels = inputSize[0] * inputSize[1];
return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
TensorList passback;
passback = TensorArray.wrap(delta.stream().map(x -> {
Tensor tensor = new Tensor(inputSize[0], inputSize[1], inputSize[2]).setByCoord(c -> x.get(c.getCoords()[2]) * alpha / pixels);
x.freeRef();
return tensor;
}).toArray(i -> new Tensor[i]));
// passback = CudaSystem.run(gpu -> {
// CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
// @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision,
// length, inputSize[2], inputSize[1], inputSize[0]);
// @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
// CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
// @Nonnull final CudaDevice.CudaTensorDescriptor inputDescriptor = gpu.newTensorDescriptor(precision,
// 1, 1, inputSize[1], inputSize[0]);
// for(int batch=0;batch<length;batch++){
// Tensor tensor = delta.get(batch);
// for(int band=0;band<bands;band++){
// int i = batch * bands + band;
// CudaMemory img = outputPtr.withByteOffset(precision.size * i * outputDescriptor.cStride);
// CudaMemory val = deltaMemory.withByteOffset(precision.size * i);
// gpu.cudnnSetTensor(inputDescriptor.getPtr(), img.getPtr(), precision.getPointer(tensor.get(band) / outputDescriptor.cStride));
// img.freeRef();
// val.freeRef();
// outputPtr.dirty().synchronize();
// }
// }
// Stream.of(deltaMemory, deltaTensor, inputDescriptor).forEach(ReferenceCounting::freeRef);
// return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, inputSize, precision);
// });
input.accumulate(ctx, passback);
}) {
@Override
protected void _free() {
super._free();
input.freeRef();
}
};
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaTensor in project MindsEye by SimiaCryptus.
the class BinarySumLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (inObj.length == 1) {
if (rightFactor != 1)
throw new IllegalStateException();
if (leftFactor != 1)
throw new IllegalStateException();
return inObj[0];
}
if (inObj.length > 2) {
if (rightFactor != 1)
throw new IllegalStateException();
if (leftFactor != 1)
throw new IllegalStateException();
return Arrays.stream(inObj).reduce((a, b) -> evalAndFree(a, b)).get();
}
assert (inObj.length == 2);
final TensorList leftData = inObj[0].getData();
final TensorList rightData = inObj[1].getData();
int[] leftDimensions = leftData.getDimensions();
if (3 < leftDimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(leftDimensions));
}
@Nonnull final int[] dimensions = { leftDimensions.length < 1 ? 0 : leftDimensions[0], leftDimensions.length < 2 ? 1 : leftDimensions[1], leftDimensions.length < 3 ? 1 : leftDimensions[2] };
final int length = leftData.length();
if (length != rightData.length())
throw new IllegalArgumentException();
if (3 != dimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
}
for (int i = 1; i < inObj.length; i++) {
if (Tensor.length(dimensions) != Tensor.length(inObj[i].getData().getDimensions())) {
throw new IllegalArgumentException(Arrays.toString(dimensions) + " != " + Arrays.toString(inObj[i].getData().getDimensions()));
}
}
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
return new Result(CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_ADD, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
// .getDenseAndFree(gpu);//.moveTo(gpu.getDeviceNumber());
@Nullable final CudaTensor lPtr = gpu.getTensor(leftData, precision, MemoryType.Device, false);
// .getDenseAndFree(gpu);//.moveTo(gpu.getDeviceNumber());
@Nullable final CudaTensor rPtr = gpu.getTensor(rightData, precision, MemoryType.Device, false);
@Nonnull final CudaMemory outputPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed, true);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
CudaMemory rPtrMemory = rPtr.getMemory(gpu);
gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(leftFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(rightFactor), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr());
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
lPtrMemory.dirty();
rPtrMemory.dirty();
outputPtr.dirty();
rPtrMemory.freeRef();
lPtrMemory.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
Stream.<ReferenceCounting>of(opDescriptor, lPtr, rPtr).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, leftData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
Runnable a = () -> {
if (inObj[0].isAlive()) {
CudaTensorList tensorList = CudaSystem.run(gpu -> {
@Nullable final CudaTensor lPtr = gpu.getTensor(delta, precision, MemoryType.Device, false);
@Nonnull final CudaMemory passbackPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed.normalize(), true);
@Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
gpu.cudnnTransformTensor(precision.getPointer(leftFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), passbackDescriptor.getPtr(), passbackPtr.getPtr());
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
passbackPtr.dirty();
lPtrMemory.dirty();
lPtrMemory.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(passbackPtr, passbackDescriptor, precision);
lPtr.freeRef();
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, delta);
inObj[0].accumulate(buffer, tensorList);
}
};
Runnable b = () -> {
if (inObj[1].isAlive()) {
CudaTensorList tensorList = CudaSystem.run(gpu -> {
@Nullable final CudaTensor lPtr = gpu.getTensor(delta, precision, MemoryType.Device, false);
@Nonnull final CudaMemory outputPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed.normalize(), true);
@Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
gpu.cudnnTransformTensor(precision.getPointer(rightFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), passbackDescriptor.getPtr(), outputPtr.getPtr());
outputPtr.dirty();
lPtrMemory.dirty();
lPtrMemory.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, passbackDescriptor, precision);
lPtr.freeRef();
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, delta);
inObj[1].accumulate(buffer, tensorList);
}
};
if (CoreSettings.INSTANCE.isSingleThreaded())
TestUtil.runAllSerial(a, b);
else
TestUtil.runAllParallel(a, b);
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(x -> x.freeRef());
leftData.freeRef();
rightData.freeRef();
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaTensor in project MindsEye by SimiaCryptus.
the class ImgTileSelectLayer method copy.
/**
* Copy cuda tensor.
*
* @param gpu the gpu
* @param input the input
* @param inputDimensions the input dimensions
* @param outputDimensions the output dimensions
* @param positionX the position x
* @param positionY the position y
* @param precision the precision
* @param outputPtr the output ptr
* @return the cuda tensor
*/
public static CudaTensor copy(final CudnnHandle gpu, @Nonnull final TensorList input, final int[] inputDimensions, final int[] outputDimensions, final int positionX, final int positionY, final Precision precision, final CudaMemory outputPtr) {
final int length = input.length();
if (3 != inputDimensions.length)
throw new IllegalArgumentException("inputDimensions.length");
if (3 != outputDimensions.length)
throw new IllegalArgumentException("dimOut.length");
int bands = inputDimensions[2];
if (bands != outputDimensions[2])
throw new IllegalArgumentException(String.format("%d != %d", bands, outputDimensions[2]));
// log.info(String.format("offset=%d,%d", offsetX, offsetY));
@Nonnull final int[] viewDim = getViewDimensions(inputDimensions, outputDimensions, new int[] { positionX, positionY, 0 });
@Nullable final CudaTensor inputTensor = gpu.getTensor(input, precision, MemoryType.Device, false);
int sourceOffset = 0;
int destinationOffset = 0;
if (positionX < 0) {
destinationOffset += Math.abs(positionX);
} else {
sourceOffset += Math.abs(positionX);
}
if (positionY < 0) {
destinationOffset += outputDimensions[0] * Math.abs((positionY));
} else {
sourceOffset += inputTensor.descriptor.hStride * (Math.abs(positionY));
}
assert sourceOffset >= 0;
assert destinationOffset >= 0;
assert sourceOffset + Tensor.length(viewDim) <= Tensor.length(inputDimensions);
assert destinationOffset + Tensor.length(viewDim) <= Tensor.length(outputDimensions);
@Nonnull final CudaDevice.CudaTensorDescriptor sourceViewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
viewDim[2], //
viewDim[1], //
viewDim[0], //
inputTensor.descriptor.nStride, //
inputTensor.descriptor.cStride, //
inputTensor.descriptor.hStride, inputTensor.descriptor.wStride);
CudaMemory inputTensorMemory = inputTensor.getMemory(gpu);
try {
if (Arrays.equals(viewDim, outputDimensions)) {
assert sourceOffset >= 0;
assert destinationOffset == 0;
return CudaTensor.wrap(inputTensorMemory.withByteOffset(sourceOffset * precision.size), sourceViewDescriptor, precision);
}
@Nonnull final CudaDevice.CudaTensorDescriptor destinationViewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
viewDim[2], //
viewDim[1], //
viewDim[0], //
outputDimensions[2] * outputDimensions[1] * outputDimensions[0], //
outputDimensions[1] * outputDimensions[0], //
outputDimensions[0], 1);
CudaSystem.handle(gpu.cudnnTransformTensor(precision.getPointer(1.0), sourceViewDescriptor.getPtr(), inputTensorMemory.getPtr().withByteOffset(sourceOffset * precision.size), precision.getPointer(1.0), destinationViewDescriptor.getPtr(), outputPtr.getPtr().withByteOffset(destinationOffset * precision.size)));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
outputPtr.dirty();
inputTensorMemory.dirty();
Stream.<ReferenceCounting>of(sourceViewDescriptor, destinationViewDescriptor).forEach(ReferenceCounting::freeRef);
@Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
outputDimensions[2], //
outputDimensions[1], //
outputDimensions[0], //
outputDimensions[2] * outputDimensions[1] * outputDimensions[0], //
outputDimensions[1] * outputDimensions[0], //
outputDimensions[0], 1);
Stream.<ReferenceCounting>of(inputTensor).forEach(ReferenceCounting::freeRef);
return CudaTensor.wrap(outputPtr, passbackDescriptor, precision);
} finally {
inputTensorMemory.freeRef();
}
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaTensor in project MindsEye by SimiaCryptus.
the class PoolingLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
final int poolDims = 2;
@Nonnull final int[] windowSize = { windowX, windowY };
@Nonnull final int[] padding = { paddingX, paddingY };
@Nonnull final int[] stride = { strideX, strideY };
final Result input = inObj[0];
final TensorList inputData = input.getData();
@Nonnull final int[] inputSize = inputData.getDimensions();
final int length = inputData.length();
final int inputDims = Tensor.length(inputSize);
@Nonnull final int[] outputSize = new int[4];
final CudaTensor outputData = CudaSystem.run(gpu -> {
try {
gpu.initThread();
@Nonnull final CudaResource<cudnnPoolingDescriptor> poolingDesc = gpu.createPoolingDescriptor(mode.id, poolDims, windowSize, padding, stride);
@Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
CudaSystem.handle(CudaSystem.cudnnGetPoolingNdForwardOutputDim(poolingDesc.getPtr(), inputTensor.descriptor.getPtr(), 4, outputSize));
assert inputSize[2] == outputSize[1];
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, outputSize[0], outputSize[1], outputSize[2], outputSize[3], outputSize[1] * outputSize[2] * outputSize[3], outputSize[2] * outputSize[3], outputSize[3], 1);
@Nonnull final CudaMemory outputTensor = gpu.allocate((long) precision.size * Tensor.length(outputSize), MemoryType.Managed.normalize(), true);
CudaMemory inputDataMemory = inputTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnPoolingForward(poolingDesc.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), inputDataMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputTensor.getPtr()));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
inputDataMemory.dirty();
outputTensor.dirty();
Stream.<ReferenceCounting>of(inputTensor, poolingDesc, inputDataMemory).forEach(ReferenceCounting::freeRef);
return CudaTensor.wrap(outputTensor, outputDescriptor, precision);
} catch (@Nonnull final Throwable e) {
throw new ComponentException("Error", e);
}
}, inputData);
return new Result(CudaTensorList.create(outputData, length, new int[] { outputSize[3], outputSize[2], outputSize[1] }, precision), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList error) -> {
assert error.length() == inputData.length();
if (input.isAlive()) {
TensorList data = CudaSystem.run(gpu -> {
@Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(precision, length, inputSize[2], inputSize[1], inputSize[0], inputSize[2] * inputSize[1] * inputSize[0], inputSize[1] * inputSize[0], inputSize[0], 1);
@Nonnull final CudaResource<cudnnPoolingDescriptor> poolingDesc = gpu.createPoolingDescriptor(mode.id, poolDims, windowSize, padding, stride);
@Nullable final CudaTensor inputTensor;
synchronized (gpu) {
inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, true);
}
@Nullable final CudaTensor errorPtr;
synchronized (gpu) {
errorPtr = gpu.getTensor(error, precision, MemoryType.Device, true);
}
@Nonnull final CudaMemory passbackBuffer = gpu.allocate((long) inputDims * precision.size * length, MemoryType.Managed.normalize(), true);
CudaMemory outputDataMemory = outputData.getMemory(gpu);
CudaMemory errorPtrMemory = errorPtr.getMemory(gpu);
CudaMemory inputDataMemory = inputTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnPoolingBackward(poolingDesc.getPtr(), precision.getPointer(this.alpha), outputData.descriptor.getPtr(), outputDataMemory.getPtr(), errorPtr.descriptor.getPtr(), errorPtrMemory.getPtr(), inputTensor.descriptor.getPtr(), inputDataMemory.getPtr(), precision.getPointer(0.0), passbackDescriptor.getPtr(), passbackBuffer.getPtr()));
outputDataMemory.dirty();
errorPtrMemory.dirty();
inputDataMemory.dirty();
passbackBuffer.dirty();
Stream.<ReferenceCounting>of(errorPtr, inputTensor, poolingDesc, outputDataMemory, errorPtrMemory, inputDataMemory).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(passbackBuffer, passbackDescriptor, precision), length, inputSize, precision);
}, error);
input.accumulate(buffer, data);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
inputData.freeRef();
outputData.freeRef();
}
@Override
public boolean isAlive() {
return input.isAlive() || !isFrozen();
}
};
}
Aggregations