use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class ImgConcatLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
// assert Arrays.stream(this.bias).allMatch(Double::isFinite);
// assert Arrays.stream(inObj).flatMapToDouble(input->input.data.stream().flatMapToDouble(x-> Arrays.stream(x.getData()))).allMatch(v->Double.isFinite(v));
int[] dimensions = inObj[0].getData().getDimensions();
assert 3 == dimensions.length;
@Nonnull final int[] outputDimensions = Arrays.copyOf(dimensions, dimensions.length);
final int length = inObj[0].getData().length();
assert Arrays.stream(inObj).allMatch(x -> {
@Nonnull int[] d = x.getData().getDimensions();
return 3 == d.length && d[0] == outputDimensions[0] && d[1] == outputDimensions[1] && x.getData().length() == length;
});
outputDimensions[2] = Arrays.stream(inObj).mapToInt(x -> x.getData().getDimensions()[2]).sum();
if (0 < maxBands && outputDimensions[2] > maxBands) {
outputDimensions[2] = maxBands;
}
return new Result(CudaSystem.run(gpu -> {
final long outputSize = ((long) length * outputDimensions[2] * outputDimensions[1] * outputDimensions[0] * precision.size);
@Nonnull final CudaMemory cudaOutput = gpu.allocate(outputSize, MemoryType.Managed.normalize(), true);
IntStream stream = IntStream.range(0, inObj.length);
// if (!CoreSettings.INSTANCE.isConservative() && parallel) stream = stream.parallel();
stream.forEach(i -> {
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
final TensorList input = inObj[i].getData();
@Nonnull final int[] inputDimensions = input.getDimensions();
assert inputDimensions[0] == outputDimensions[0];
assert inputDimensions[1] == outputDimensions[1];
int bandOffset = IntStream.range(0, i).map(j -> inObj[j].getData().getDimensions()[2]).sum();
if (maxBands > 0)
bandOffset = Math.min(bandOffset, maxBands);
int inputBands = inputDimensions[2];
if (maxBands > 0)
inputBands = Math.min(inputBands, maxBands - bandOffset);
if (inputBands > 0) {
@Nullable final CudaTensor cudaInput = gpu.getTensor(input, precision, MemoryType.Device, false);
assert inputBands > 0;
assert maxBands <= 0 || inputBands <= maxBands;
assert inputBands <= inputDimensions[2];
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
inputBands, //
outputDimensions[1], //
outputDimensions[0], //
outputDimensions[2] * outputDimensions[1] * outputDimensions[0], //
outputDimensions[1] * outputDimensions[0], //
outputDimensions[0], 1);
@Nonnull final CudaDevice.CudaTensorDescriptor inputDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
inputBands, //
inputDimensions[1], //
inputDimensions[0], //
cudaInput.descriptor.nStride, //
cudaInput.descriptor.cStride, //
cudaInput.descriptor.hStride, cudaInput.descriptor.wStride);
int byteOffset = outputDescriptor.cStride * bandOffset * precision.size;
CudaMemory cudaInputMemory = cudaInput.getMemory(gpu);
gpu.cudnnTransformTensor(precision.getPointer(1.0), inputDescriptor.getPtr(), cudaInputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), cudaOutput.getPtr().withByteOffset(byteOffset));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
cudaInputMemory.dirty();
cudaOutput.dirty();
cudaInputMemory.freeRef();
Stream.<ReferenceCounting>of(cudaInput, outputDescriptor, inputDescriptor).forEach(ReferenceCounting::freeRef);
}
});
CudaDevice.CudaTensorDescriptor outDesc = gpu.newTensorDescriptor(precision, length, outputDimensions[2], outputDimensions[1], outputDimensions[0]);
return CudaTensorList.wrap(CudaTensor.wrap(cudaOutput, outDesc, precision), length, outputDimensions, precision);
}, Arrays.stream(inObj).map(Result::getData).toArray()), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
assert delta.getDimensions()[0] == outputDimensions[0];
assert delta.getDimensions()[1] == outputDimensions[1];
assert delta.getDimensions()[2] == outputDimensions[2];
if (!Arrays.equals(delta.getDimensions(), outputDimensions)) {
throw new AssertionError(Arrays.toString(delta.getDimensions()) + " != " + Arrays.toString(outputDimensions));
}
// outputBuffer.freeRef();
// assert error.stream().flatMapToDouble(x-> Arrays.stream(x.getData())).allMatch(Double::isFinite);
@Nonnull IntStream stream = IntStream.range(0, inObj.length);
if (!CoreSettings.INSTANCE.isSingleThreaded() && parallel)
stream = stream.parallel();
stream.forEach(i -> {
final Result input = inObj[i];
int[] inputDimentions = input.getData().getDimensions();
assert 3 == inputDimentions.length;
assert delta.length() == input.getData().length();
assert inputDimentions[0] == outputDimensions[0];
assert inputDimentions[1] == outputDimensions[1];
int bandOffset = IntStream.range(0, i).map(j -> inObj[j].getData().getDimensions()[2]).sum();
int inputBands = maxBands <= 0 ? inputDimentions[2] : Math.min(inputDimentions[2], maxBands - bandOffset);
if (inputBands > 0 && input.isAlive()) {
assert inputBands <= inputDimentions[2];
assert inputBands <= outputDimensions[2];
final TensorList passbackTensorList = CudaSystem.run(gpu -> {
final CudaTensor result;
synchronized (gpu) {
result = gpu.getTensor(delta, precision, MemoryType.Device, true);
}
@Nullable final CudaTensor cudaDelta = result;
CudaMemory cudaDeltaMemory = cudaDelta.getMemory(gpu);
try {
if (inputDimentions[2] == inputBands) {
@Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
inputDimentions[2], //
inputDimentions[1], //
inputDimentions[0], //
cudaDelta.descriptor.nStride, //
cudaDelta.descriptor.cStride, //
cudaDelta.descriptor.hStride, cudaDelta.descriptor.wStride);
int byteOffset = cudaDelta.descriptor.cStride * bandOffset * precision.size;
CudaMemory ptr = cudaDeltaMemory.withByteOffset(byteOffset);
CudaTensor cudaTensor = CudaTensor.wrap(ptr, viewDescriptor, precision);
Stream.<ReferenceCounting>of(cudaDelta).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(cudaTensor, length, inputDimentions, precision);
} else {
@Nonnull final CudaDevice.CudaTensorDescriptor passbackTransferDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
inputBands, //
inputDimentions[1], //
inputDimentions[0], //
inputDimentions[2] * inputDimentions[1] * inputDimentions[0], //
inputDimentions[1] * inputDimentions[0], //
inputDimentions[0], 1);
@Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
inputDimentions[2], //
inputDimentions[1], //
inputDimentions[0], //
inputDimentions[2] * inputDimentions[1] * inputDimentions[0], //
inputDimentions[1] * inputDimentions[0], //
inputDimentions[0], 1);
@Nonnull final CudaDevice.CudaTensorDescriptor deltaViewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
inputBands, //
inputDimentions[1], //
inputDimentions[0], //
cudaDelta.descriptor.nStride, //
cudaDelta.descriptor.cStride, //
cudaDelta.descriptor.hStride, cudaDelta.descriptor.wStride);
@Nonnull final CudaMemory cudaBackprop = gpu.allocate((long) passbackDescriptor.nStride * length * precision.size, MemoryType.Managed.normalize(), inputBands == inputDimentions[2]);
int byteOffset = cudaDelta.descriptor.cStride * bandOffset * precision.size;
gpu.cudnnTransformTensor(precision.getPointer(1.0), deltaViewDescriptor.getPtr(), cudaDeltaMemory.getPtr().withByteOffset(byteOffset), precision.getPointer(0.0), passbackTransferDescriptor.getPtr(), cudaBackprop.getPtr());
cudaBackprop.dirty();
cudaDeltaMemory.dirty();
Stream.<ReferenceCounting>of(cudaDelta, deltaViewDescriptor, passbackTransferDescriptor).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(cudaBackprop, passbackDescriptor, precision), length, inputDimentions, precision);
}
} finally {
cudaDeltaMemory.freeRef();
}
});
input.accumulate(buffer, passbackTensorList);
}
// assert passbackTensorList.stream().flatMapToDouble(x-> Arrays.stream(x.getData())).allMatch(v->Double.isFinite(v));
});
}) {
@Override
protected void _free() {
for (@Nonnull Result result : inObj) {
result.freeRef();
result.getData().freeRef();
}
}
@Override
public boolean isAlive() {
return Arrays.stream(inObj).anyMatch(x -> x.isAlive());
}
};
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class ImgTileAssemblyLayer method copy.
/**
* Copy.
*
* @param gpu the gpu
* @param length the length
* @param sourceDimensions the length in
* @param source the input buffer
* @param destinationDimensions the length out
* @param destination the output buffer
* @param positionX the position x
* @param positionY the position y
* @return the int [ ]
*/
public int[] copy(@Nonnull CudnnHandle gpu, int length, @Nonnull int[] sourceDimensions, @Nonnull CudaTensor source, @Nonnull int[] destinationDimensions, @Nonnull CudaMemory destination, int positionX, int positionY) {
if (3 != sourceDimensions.length)
throw new IllegalArgumentException("inputDimensions.length");
if (3 != destinationDimensions.length)
throw new IllegalArgumentException("dimOut.length");
int bands = sourceDimensions[2];
if (bands != destinationDimensions[2])
throw new IllegalArgumentException(String.format("%d != %d", bands, destinationDimensions[2]));
// log.info(String.format("offset=%d,%d", offsetX, offsetY));
@Nonnull final int[] viewDim = getViewDimensions(sourceDimensions, destinationDimensions, new int[] { positionX, positionY, 0 });
@Nonnull final CudaDevice.CudaTensorDescriptor sourceViewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
viewDim[2], //
viewDim[1], //
viewDim[0], //
source.descriptor.nStride, //
source.descriptor.cStride, //
source.descriptor.hStride, source.descriptor.wStride);
@Nonnull final CudaDevice.CudaTensorDescriptor destinationViewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
viewDim[2], //
viewDim[1], //
viewDim[0], //
destinationDimensions[2] * destinationDimensions[1] * destinationDimensions[0], //
destinationDimensions[1] * destinationDimensions[0], //
destinationDimensions[0], 1);
int sourceOffset = 0;
int destinationOffset = 0;
if (positionX > 0) {
destinationOffset += Math.abs(positionX);
} else {
sourceOffset += source.descriptor.wStride * Math.abs(positionX);
}
if (positionY > 0) {
destinationOffset += destinationDimensions[0] * Math.abs((positionY));
} else {
sourceOffset += source.descriptor.hStride * (Math.abs(positionY));
}
assert sourceOffset >= 0;
assert destinationOffset >= 0;
assert sourceOffset + Tensor.length(viewDim) <= (source.descriptor.nStride * length);
assert destinationOffset + Tensor.length(viewDim) <= Tensor.length(destinationDimensions);
CudaMemory sourceMemory = source.getMemory(gpu);
CudaSystem.handle(gpu.cudnnTransformTensor(precision.getPointer(1.0), sourceViewDescriptor.getPtr(), sourceMemory.getPtr().withByteOffset(sourceOffset * precision.size), precision.getPointer(1.0), destinationViewDescriptor.getPtr(), destination.getPtr().withByteOffset(destinationOffset * precision.size)));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
sourceMemory.dirty();
destination.dirty();
sourceMemory.freeRef();
Arrays.stream(new ReferenceCounting[] { sourceViewDescriptor, destinationViewDescriptor }).forEach(ReferenceCounting::freeRef);
return viewDim;
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class ImgTileAssemblyLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
if (1 == inObj.length) {
return inObj[0];
}
int[] inputDimensions = inObj[0].getData().getDimensions();
assert 3 == inputDimensions.length;
final int length = inObj[0].getData().length();
int[] outputDims = getOutputDims(inObj);
final TensorList outputData = CudaSystem.run(gpu -> {
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
assert outputDims[0] > 0;
assert outputDims[1] > 0;
assert outputDims[2] > 0;
@Nonnull final CudaMemory outputBuffer = gpu.allocate((long) length * outputDims[2] * outputDims[1] * outputDims[0] * precision.size, MemoryType.Managed.normalize(), false);
int totalWidth = 0;
int totalHeight = 0;
int inputIndex = 0;
List<CopyParams> copies = new ArrayList<>();
for (int row = 0; row < rows; row++) {
int positionX = 0;
int rowHeight = 0;
for (int col = 0; col < columns; col++) {
int[] tileDimensions = inObj[inputIndex].getData().getDimensions();
rowHeight = Math.max(rowHeight, tileDimensions[1]);
copies.add(new CopyParams(gpu, inObj, outputBuffer, length, outputDims, tileDimensions, inputIndex, positionX, totalHeight));
positionX += tileDimensions[0];
inputIndex += 1;
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
}
totalHeight += rowHeight;
totalWidth = Math.max(totalWidth, positionX);
}
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
Stream<CopyParams> stream = copies.stream();
if (!CoreSettings.INSTANCE.isSingleThreaded() && parallel)
stream = stream.parallel();
stream.forEach(this::copy);
Arrays.stream(inObj).forEach(r -> r.getData().freeRef());
CudaDevice.CudaTensorDescriptor descriptor = gpu.newTensorDescriptor(precision, length, outputDims[2], outputDims[1], outputDims[0]);
CudaTensor ptr = CudaTensor.wrap(outputBuffer, descriptor, precision);
return CudaTensorList.wrap(ptr, length, outputDims, precision);
}, Arrays.stream(inObj).map(Result::getData).toArray());
return new Result(outputData, (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList error) -> {
if (!Arrays.equals(error.getDimensions(), outputData.getDimensions())) {
throw new AssertionError(Arrays.toString(error.getDimensions()) + " != " + Arrays.toString(outputData.getDimensions()));
}
if (error.length() != outputData.length()) {
throw new AssertionError(error.length() + " != " + outputData.length());
}
assert error.length() == length;
int totalHeight = 0;
int inputIndex = 0;
List<BackpropParams> tasks = new ArrayList<>();
for (int row = 0; row < rows; row++) {
int positionX = 0;
int rowHeight = 0;
for (int col = 0; col < columns; col++) {
Result in = inObj[inputIndex];
int[] tileDimensions = in.getData().getDimensions();
rowHeight = Math.max(rowHeight, tileDimensions[1]);
if (inObj[inputIndex].isAlive()) {
tasks.add(new BackpropParams(inObj, buffer, error, outputDims, tileDimensions, length, positionX, totalHeight, inputIndex));
}
positionX += tileDimensions[0];
inputIndex += 1;
}
totalHeight += rowHeight;
}
Stream<BackpropParams> stream = tasks.stream();
if (!CoreSettings.INSTANCE.isSingleThreaded() && parallel)
stream = stream.parallel();
stream.forEach(this::backprop);
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return Arrays.stream(inObj).anyMatch(x -> x.isAlive());
}
};
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class NProductLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
if (inObj.length <= 1) {
throw new IllegalArgumentException("inObj.length=" + inObj.length);
}
@Nonnull final int[] dimensions = inObj[0].getData().getDimensions();
final int length = inObj[0].getData().length();
if (3 != dimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
}
for (int i = 1; i < inObj.length; i++) {
TensorList data = inObj[i].getData();
if (Tensor.length(dimensions) != Tensor.length(data.getDimensions())) {
throw new IllegalArgumentException(Arrays.toString(dimensions) + " != " + Arrays.toString(data.getDimensions()));
}
}
return new Result(CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
@Nonnull final TensorList result1 = Arrays.stream(inObj).map(x -> {
TensorList data = x.getData();
data.addRef();
return data;
}).reduce((l, r) -> {
@Nullable final CudaTensor lPtr = gpu.getTensor(l, precision, MemoryType.Device, false);
@Nullable final CudaTensor rPtr = gpu.getTensor(r, precision, MemoryType.Device, false);
// assert lPtr.memory.size == rPtr.memory.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) outputDescriptor.nStride * length * precision.size, MemoryType.Device, true);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
CudaMemory rPtrMemory = rPtr.getMemory(gpu);
CudaSystem.handle(JCudnn.cudnnOpTensor(gpu.handle, opDescriptor.getPtr(), precision.getPointer(1.0), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
lPtrMemory.dirty();
rPtrMemory.dirty();
outputPtr.dirty();
lPtrMemory.freeRef();
rPtrMemory.freeRef();
Arrays.stream(new ReferenceCounting[] { lPtr, rPtr, l, r }).forEach(ReferenceCounting::freeRef);
outputDescriptor.addRef();
return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, dimensions, precision);
}).get();
Arrays.stream(new ReferenceCounting[] { opDescriptor, outputDescriptor }).forEach(ReferenceCounting::freeRef);
return result1;
}, Arrays.stream(inObj).map(Result::getData).toArray()), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
for (int index = 0; index < inObj.length; index++) {
final Result input = inObj[index];
if (input.isAlive()) {
final int _index = index;
@Nonnull TensorList data = IntStream.range(0, inObj.length).mapToObj(i -> {
TensorList tensorList = i == _index ? delta : inObj[i].getData();
tensorList.addRef();
return tensorList;
}).reduce((l, r) -> {
return CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
@Nullable final CudaTensor lPtr = gpu.getTensor(l, precision, MemoryType.Device, false);
@Nullable final CudaTensor rPtr = gpu.getTensor(r, precision, MemoryType.Device, false);
// assert lPtr.memory.size == rPtr.memory.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) outputDescriptor.nStride * length * precision.size, MemoryType.Device, true);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
CudaMemory rPtrMemory = rPtr.getMemory(gpu);
CudaSystem.handle(JCudnn.cudnnOpTensor(gpu.handle, opDescriptor.getPtr(), precision.getPointer(1.0), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
lPtrMemory.dirty();
rPtrMemory.dirty();
outputPtr.dirty();
lPtrMemory.freeRef();
rPtrMemory.freeRef();
Stream.of(lPtr, rPtr, opDescriptor, l, r).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, dimensions, precision);
}, l, r);
}).get();
input.accumulate(buffer, data);
}
}
delta.freeRef();
}) {
@Override
public final void accumulate(DeltaSet<Layer> buffer, TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
for (int i = 0; i < inObj.length; i++) {
inObj[i].getData().freeRef();
}
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class ProductLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
if (inObj.length != 2) {
throw new IllegalArgumentException("inObj.length=" + inObj.length);
}
Result left = inObj[0];
Result right = inObj[1];
final TensorList leftData = left.getData();
final TensorList rightData = right.getData();
@Nonnull final int[] leftDimensions = leftData.getDimensions();
@Nonnull final int[] rightDimensions = rightData.getDimensions();
final int length = leftData.length();
if (3 != leftDimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(leftDimensions));
}
return new Result(CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, leftDimensions[2], leftDimensions[1], leftDimensions[0], leftDimensions[2] * leftDimensions[1] * leftDimensions[0], leftDimensions[1] * leftDimensions[0], leftDimensions[0], 1);
@Nullable final CudaTensor lPtr = gpu.getTensor(leftData, precision, MemoryType.Device, false);
@Nullable final CudaTensor rPtr = gpu.getTensor(rightData, precision, MemoryType.Device, false);
// assert lPtr.size == rPtr.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
CudaMemory rPtrMemory = rPtr.getMemory(gpu);
CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(1.0), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
lPtrMemory.dirty();
rPtrMemory.dirty();
outputPtr.dirty();
lPtrMemory.freeRef();
rPtrMemory.freeRef();
rPtr.freeRef();
lPtr.freeRef();
opDescriptor.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
return CudaTensorList.wrap(cudaTensor, length, leftDimensions, precision);
}, leftData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (left.isAlive()) {
@Nonnull TensorList data = CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, leftDimensions[2], leftDimensions[1], leftDimensions[0], leftDimensions[2] * leftDimensions[1] * leftDimensions[0], leftDimensions[1] * leftDimensions[0], leftDimensions[0], 1);
@Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
@Nullable final CudaTensor rightTensor = gpu.getTensor(right.getData(), precision, MemoryType.Device, false);
// assert deltaTensor.size == rightTensor.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
CudaMemory rightTensorMemory = rightTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(1.0), rightTensor.descriptor.getPtr(), rightTensorMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
deltaTensorMemory.dirty();
rightTensorMemory.dirty();
outputPtr.dirty();
deltaTensorMemory.freeRef();
rightTensorMemory.freeRef();
CudaTensor cudaTensor = new CudaTensor(outputPtr, outputDescriptor, precision);
Arrays.stream(new ReferenceCounting[] { deltaTensor, rightTensor, opDescriptor, outputDescriptor }).forEach(ReferenceCounting::freeRef);
outputPtr.freeRef();
return CudaTensorList.wrap(cudaTensor, length, leftDimensions, precision);
}, delta);
left.accumulate(buffer, data);
}
if (right.isAlive()) {
@Nonnull TensorList data = CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor expandedDescriptor = gpu.newTensorDescriptor(precision, length, leftDimensions[2], leftDimensions[1], leftDimensions[0], leftDimensions[2] * leftDimensions[1] * leftDimensions[0], leftDimensions[1] * leftDimensions[0], leftDimensions[0], 1);
@Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
delta.freeRef();
@Nullable final CudaTensor leftTensor = gpu.getTensor(left.getData(), precision, MemoryType.Device, false);
// assert deltaTensor.size == rightTensor.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * expandedDescriptor.nStride * length, MemoryType.Device, true);
CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
CudaMemory leftTensorMemory = leftTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(1.0), leftTensor.descriptor.getPtr(), leftTensorMemory.getPtr(), precision.getPointer(0.0), expandedDescriptor.getPtr(), outputPtr.getPtr()));
deltaTensorMemory.dirty();
leftTensorMemory.dirty();
outputPtr.dirty();
if (Arrays.equals(rightDimensions, leftDimensions) && length == rightData.length()) {
deltaTensorMemory.freeRef();
leftTensorMemory.freeRef();
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
outputPtr.dirty();
CudaTensor cudaTensor = new CudaTensor(outputPtr, expandedDescriptor, precision);
Stream.of(deltaTensor, leftTensor, opDescriptor, expandedDescriptor, outputPtr).forEach(ReferenceCounting::freeRef);
CudaTensorList tensorList = CudaTensorList.wrap(cudaTensor, length, rightDimensions, precision);
return tensorList;
} else {
@Nonnull final CudaDevice.CudaTensorDescriptor reducedOutputDescriptor = gpu.newTensorDescriptor(precision, rightData.length(), rightDimensions[2], rightDimensions[1], rightDimensions[0], rightDimensions[2] * rightDimensions[1] * rightDimensions[0], rightDimensions[1] * rightDimensions[0], rightDimensions[0], 1);
long size = (long) precision.size * reducedOutputDescriptor.nStride * rightData.length();
@Nonnull final CudaMemory reducedOutputPtr = gpu.allocate(size, MemoryType.Managed, true);
CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(outputPtr.size, MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate(3, MemoryType.Device, false);
// outputPtr.synchronize();
gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(1.0), expandedDescriptor.getPtr(), outputPtr.getPtr(), precision.getPointer(0.0), reducedOutputDescriptor.getPtr(), reducedOutputPtr.getPtr());
reducedOutputPtr.dirty();
workspacePtr.dirty();
outputPtr.dirty();
deltaTensorMemory.freeRef();
leftTensorMemory.freeRef();
CudaTensor cudaTensor = new CudaTensor(reducedOutputPtr, reducedOutputDescriptor, precision);
Stream.of(deltaTensor, leftTensor, opDescriptor, expandedDescriptor, outputPtr, reducedOutputPtr, reducedOutputDescriptor, reduceTensorDescriptor, workspacePtr, indexPtr).forEach(ReferenceCounting::freeRef);
CudaTensorList tensorList = CudaTensorList.wrap(cudaTensor, rightData.length(), rightDimensions, precision);
return tensorList;
}
}, delta);
right.accumulate(buffer, data);
} else {
delta.freeRef();
}
}) {
@Override
public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
leftData.freeRef();
rightData.freeRef();
left.freeRef();
right.freeRef();
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
Aggregations