use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class GramianLayer method getFeedback.
/**
* Gets feedback.
*
* @param gpu the gpu
* @param inputTensor the input tensor
* @param deltaTensor the delta tensor
* @return the feedback
*/
@Nonnull
public CudaTensorList getFeedback(final CudnnHandle gpu, final CudaTensor inputTensor, final CudaTensor deltaTensor) {
int pixels = inputTensor.descriptor.height * inputTensor.descriptor.width;
CudaMemory inputMemory = inputTensor.getMemory(gpu);
CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
@Nonnull final int[] inputDimensions = { inputTensor.descriptor.width, inputTensor.descriptor.height, inputTensor.descriptor.channels };
final int length = inputTensor.descriptor.batchCount;
final int bands = inputDimensions[2];
@Nullable final CudaMemory bufferMemory = gpu.allocate((long) inputTensor.descriptor.nStride * length * precision.size, MemoryType.Device, true);
@Nonnull final CudaDevice.CudaTensorDescriptor bufferDescriptor = gpu.newTensorDescriptor(precision, length, bands, inputDimensions[1], inputDimensions[0], //
inputDimensions[0] * inputDimensions[1] * bands, //
inputDimensions[0] * inputDimensions[1], //
inputDimensions[0], 1);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, bands, inputDimensions[1], inputDimensions[0], //
inputDimensions[0] * inputDimensions[1] * bands, //
inputDimensions[0] * inputDimensions[1], //
inputDimensions[0], 1);
@Nullable final CudaMemory outputMemory = gpu.allocate((long) outputDescriptor.nStride * precision.size * length, MemoryType.Managed, true);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(Math.max(outputMemory.size, inputMemory.size), MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
@Nonnull final CudaResource<cudnnOpTensorDescriptor> multiplyDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
CudaResource<cudnnReduceTensorDescriptor> reduceAddDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
@Nonnull final CudaDevice.CudaTensorDescriptor bandDescriptor = gpu.newTensorDescriptor(precision, length, 1, inputDimensions[1], inputDimensions[0], inputDimensions[2] * inputDimensions[1] * inputDimensions[0], inputDimensions[1] * inputDimensions[0], inputDimensions[0], 1);
@Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor1 = gpu.newTensorDescriptor(//
precision, //
length, //
bands, //
1, //
1, //
deltaTensor.descriptor.nStride, //
deltaTensor.descriptor.cStride, //
deltaTensor.descriptor.hStride, deltaTensor.descriptor.wStride);
@Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor2 = gpu.newTensorDescriptor(//
precision, //
length, //
bands, //
1, //
1, //
deltaTensor.descriptor.nStride, //
deltaTensor.descriptor.cStride * bands, //
deltaTensor.descriptor.hStride, //
deltaTensor.descriptor.wStride);
IntStream.range(0, bands).forEach(band -> {
CudaMemory deltaView1 = deltaMemory.withByteOffset(band * precision.size * bands);
CudaSystem.handle(gpu.cudnnOpTensor(multiplyDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), viewDescriptor1.getPtr(), deltaView1.getPtr(), precision.getPointer(0.0), bufferDescriptor.getPtr(), bufferMemory.getPtr()));
inputMemory.dirty();
deltaView1.dirty();
bufferMemory.dirty();
deltaView1.freeRef();
CudaMemory deltaView2 = deltaMemory.withByteOffset(band * precision.size);
CudaSystem.handle(gpu.cudnnOpTensor(multiplyDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), viewDescriptor2.getPtr(), deltaView2.getPtr(), precision.getPointer(1.0), bufferDescriptor.getPtr(), bufferMemory.getPtr()));
inputMemory.dirty();
deltaView2.dirty();
bufferMemory.dirty();
deltaView2.freeRef();
CudaMemory outputViewMem = outputMemory.withByteOffset(bandDescriptor.cStride * band * precision.size);
gpu.cudnnReduceTensor(reduceAddDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha / pixels), bufferDescriptor.getPtr(), bufferMemory.getPtr(), precision.getPointer(0.0), bandDescriptor.getPtr(), outputViewMem.getPtr());
outputViewMem.dirty();
bufferMemory.dirty();
outputViewMem.freeRef();
});
CudaTensorList feedback = CudaTensorList.wrap(CudaTensor.wrap(outputMemory, outputDescriptor, precision), length, inputDimensions, precision);
bandDescriptor.freeRef();
viewDescriptor1.freeRef();
viewDescriptor2.freeRef();
workspacePtr.freeRef();
indexPtr.freeRef();
reduceAddDescriptor.freeRef();
inputMemory.freeRef();
multiplyDescriptor.freeRef();
deltaMemory.freeRef();
bufferMemory.freeRef();
bufferDescriptor.freeRef();
return feedback;
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class GramianLayer method getOutput.
/**
* Gets output.
*
* @param gpu the gpu
* @param inputTensor the input tensor
* @return the output
*/
@Nonnull
public CudaTensorList getOutput(final CudnnHandle gpu, final CudaTensor inputTensor) {
int pixels = inputTensor.descriptor.height * inputTensor.descriptor.width;
@Nonnull final int[] inputDimensions = { inputTensor.descriptor.width, inputTensor.descriptor.height, inputTensor.descriptor.channels };
final int length = inputTensor.descriptor.batchCount;
final int bands = inputDimensions[2];
@Nonnull final int[] outputDimensions = { 1, 1, bands * bands };
CudaMemory inputMemory = inputTensor.getMemory(gpu);
@Nonnull final CudaDevice.CudaTensorDescriptor ouputDescriptor = gpu.newTensorDescriptor(precision, length, bands * bands, 1, 1, //
bands * bands, //
1, //
1, 1);
@Nullable final CudaMemory outputMemory = gpu.allocate((long) ouputDescriptor.nStride * precision.size * length, MemoryType.Device, true);
@Nonnull final CudaDevice.CudaTensorDescriptor bufferDescriptor = gpu.newTensorDescriptor(precision, length, bands, inputDimensions[1], inputDimensions[0], //
inputDimensions[0] * inputDimensions[1] * bands, //
inputDimensions[0] * inputDimensions[1], //
inputDimensions[0], 1);
@Nullable final CudaMemory bufferMemory = gpu.allocate((long) bufferDescriptor.nStride * length * precision.size, MemoryType.Device, true);
@Nonnull final CudaDevice.CudaTensorDescriptor inputViewDescriptor = gpu.newTensorDescriptor(precision, length, 1, inputDimensions[1], inputDimensions[0], //
inputTensor.descriptor.nStride, //
inputTensor.descriptor.cStride, //
inputTensor.descriptor.hStride, inputTensor.descriptor.wStride);
CudaResource<cudnnReduceTensorDescriptor> reduceAddDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
@Nonnull final CudaDevice.CudaTensorDescriptor outputViewDescriptor = gpu.newTensorDescriptor(precision, length, bands, 1, 1, bands * bands, 1, 1, 1);
@Nonnull final CudaResource<cudnnOpTensorDescriptor> multiplyDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(Math.max(outputMemory.size, inputMemory.size), MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate((long) 12 * length, MemoryType.Device, true);
IntStream.range(0, inputDimensions[2]).forEach(band -> {
CudaMemory inputView = inputMemory.withByteOffset(band * precision.size * inputTensor.descriptor.cStride);
CudaSystem.handle(gpu.cudnnOpTensor(multiplyDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), inputViewDescriptor.getPtr(), inputView.getPtr(), precision.getPointer(0.0), bufferDescriptor.getPtr(), bufferMemory.getPtr()));
bufferMemory.dirty();
inputView.dirty();
inputMemory.dirty();
inputView.freeRef();
CudaMemory outputView = outputMemory.withByteOffset(band * precision.size * bands);
CudaSystem.handle(gpu.cudnnReduceTensor(reduceAddDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha / pixels), bufferDescriptor.getPtr(), bufferMemory.getPtr(), precision.getPointer(0.0), outputViewDescriptor.getPtr(), outputView.getPtr()));
outputView.dirty();
bufferMemory.dirty();
outputView.freeRef();
});
outputMemory.dirty();
bufferMemory.dirty();
inputMemory.dirty();
bufferMemory.freeRef();
multiplyDescriptor.freeRef();
inputMemory.freeRef();
bufferDescriptor.freeRef();
inputViewDescriptor.freeRef();
outputViewDescriptor.freeRef();
reduceAddDescriptor.freeRef();
workspacePtr.freeRef();
indexPtr.freeRef();
return CudaTensorList.wrap(CudaTensor.wrap(outputMemory, ouputDescriptor, precision), length, outputDimensions, precision);
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class ImgBandBiasLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
if (inObj.length != 1) {
throw new IllegalArgumentException("inObj.length=" + inObj.length);
}
Result input = inObj[0];
final TensorList leftData = input.getData();
@Nonnull final int[] inputDimensions = leftData.getDimensions();
final int length = leftData.length();
if (3 != inputDimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(inputDimensions));
}
// assert !right.isAlive();
return new Result(CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_ADD, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, inputDimensions[2], inputDimensions[1], inputDimensions[0], inputDimensions[2] * inputDimensions[1] * inputDimensions[0], inputDimensions[1] * inputDimensions[0], inputDimensions[0], 1);
@Nullable final CudaTensor inputTensor = gpu.getTensor(leftData, precision, MemoryType.Device, false);
CudaMemory biasMem = gpu.allocate(bias.length() * precision.size, MemoryType.Device, true).write(precision, bias.getData());
int[] biasDim = bias.getDimensions();
CudaDevice.CudaTensorDescriptor biasDescriptor = gpu.newTensorDescriptor(precision, 1, biasDim[2], biasDim[1], biasDim[0], biasDim[2] * biasDim[1] * biasDim[0], biasDim[1] * biasDim[0], biasDim[0], 1);
// assert lPtr.size == rPtr.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Managed.normalize(), true);
CudaMemory inputMemory = inputTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), biasDescriptor.getPtr(), biasMem.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
inputMemory.dirty();
biasMem.dirty();
outputPtr.dirty();
inputMemory.freeRef();
biasMem.freeRef();
biasDescriptor.freeRef();
inputTensor.freeRef();
opDescriptor.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
return CudaTensorList.wrap(cudaTensor, length, inputDimensions, precision);
}, leftData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (!isFrozen()) {
@Nonnull double[] biasDelta = CudaSystem.run(gpu -> {
@Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
CudaMemory biasMem = gpu.allocate(bias.length() * precision.size, MemoryType.Device, true).write(precision, bias.getData());
int[] biasDim = bias.getDimensions();
CudaDevice.CudaTensorDescriptor biasDescriptor = gpu.newTensorDescriptor(precision, 1, biasDim[2], biasDim[1], biasDim[0], biasDim[2] * biasDim[1] * biasDim[0], biasDim[1] * biasDim[0], biasDim[0], 1);
CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
gpu.cudnnConvolutionBackwardBias(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(0.0), biasDescriptor.getPtr(), biasMem.getPtr());
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
biasMem.dirty();
double[] biasV = new double[bias.length()];
biasMem.read(precision, biasV);
Stream.<ReferenceCounting>of(biasMem, deltaTensorMemory, deltaTensor, biasDescriptor).forEach(ReferenceCounting::freeRef);
return biasV;
}, delta);
buffer.get(ImgBandBiasLayer.this, bias).addInPlace(biasDelta).freeRef();
}
if (input.isAlive()) {
input.accumulate(buffer, delta);
} else {
delta.freeRef();
}
}) {
@Override
public final void accumulate(DeltaSet<Layer> buffer, TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
leftData.freeRef();
input.freeRef();
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class ImgLinearSubnetLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
assert 1 == inObj.length;
Result input = inObj[0];
TensorList inputData = input.getData();
@Nonnull final int[] inputDims = inputData.getDimensions();
assert 3 == inputDims.length;
int length = inputData.length();
int maxBand = legs.stream().mapToInt(x -> x.toBand).max().getAsInt();
assert maxBand == inputDims[2] : maxBand + " != " + inputDims[2];
assert IntStream.range(0, maxBand).allMatch(i -> 1 == legs.stream().filter(x -> x.fromBand <= i && x.toBand > i).count());
CudaTensor passback = CudaSystem.run(gpu -> {
return CudaTensor.wrap(gpu.allocate(inputData.getElements() * precision.size, MemoryType.Device, true), gpu.newTensorDescriptor(precision, length, inputDims[2], inputDims[1], inputDims[0]), precision);
});
try {
AtomicInteger counter = new AtomicInteger(0);
SumInputsLayer sumInputsLayer = new SumInputsLayer();
try {
Result[] legResults = legs.stream().map(leg -> {
passback.addRef();
ImgBandSelectLayer imgBandSelectLayer = new ImgBandSelectLayer(leg.fromBand, leg.toBand);
input.addRef();
TensorList legData = imgBandSelectLayer.eval(input).getDataAndFree();
imgBandSelectLayer.freeRef();
return leg.inner.evalAndFree(new Result(legData, (DeltaSet<Layer> ctx, TensorList delta) -> {
int[] outputDimensions = delta.getDimensions();
int[] inputDimensions = inputDims;
synchronized (passback) {
CudaSystem.run(gpu -> {
@Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
outputDimensions[2], //
outputDimensions[1], //
outputDimensions[0], //
inputDimensions[2] * inputDimensions[1] * inputDimensions[0], //
inputDimensions[1] * inputDimensions[0], //
inputDimensions[0], 1);
final int byteOffset = viewDescriptor.cStride * leg.fromBand * precision.size;
assert delta.length() == inputData.length();
assert passback.getDeviceId() == gpu.getDeviceId();
// assert error.stream().flatMapToDouble(x-> Arrays.stream(x.getData())).allMatch(Double::isFinite);
@Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
@Nonnull final CudaMemory passbackBuffer = passback.getMemory(gpu);
CudaMemory errorPtrMemory = deltaTensor.getMemory(gpu);
passbackBuffer.synchronize();
gpu.cudnnTransformTensor(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), errorPtrMemory.getPtr(), precision.getPointer(0.0), viewDescriptor.getPtr(), passbackBuffer.getPtr().withByteOffset(byteOffset));
errorPtrMemory.dirty();
passbackBuffer.dirty();
Stream.<ReferenceCounting>of(deltaTensor, viewDescriptor, passbackBuffer, errorPtrMemory).forEach(ReferenceCounting::freeRef);
}, passback);
}
if (counter.incrementAndGet() >= legs.size()) {
counter.set(0);
input.accumulate(ctx, CudaTensorList.create(passback, length, inputDims, precision));
}
}) {
@Override
protected void _free() {
super._free();
input.freeRef();
passback.freeRef();
}
});
}).toArray(i -> new Result[i]);
return sumInputsLayer.setParallel(parallel).setPrecision(precision).evalAndFree(legResults);
} finally {
sumInputsLayer.freeRef();
input.freeRef();
}
} finally {
passback.freeRef();
}
}
use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.
the class ImgCropLayer method copy.
/**
* Copy cuda tensor.
*
* @param gpu the gpu
* @param input the input tensor
* @param length the length
* @param inputDimensions the input dimensions
* @param outputDimensions the output dimensions
* @param dirty the dirty
* @param precision the precision
* @return the cuda tensor
*/
public static CudaTensor copy(final CudnnHandle gpu, final CudaTensor input, final int length, final int[] inputDimensions, final int[] outputDimensions, final boolean dirty, Precision precision) {
if (3 != inputDimensions.length)
throw new IllegalArgumentException("inputDimensions.length");
if (3 != outputDimensions.length)
throw new IllegalArgumentException("dimOut.length");
if (inputDimensions[2] != outputDimensions[2]) {
throw new IllegalArgumentException(String.format("%d != %d", inputDimensions[2], outputDimensions[2]));
}
// log.info(String.format("offset=%d,%d", offsetX, offsetY));
@Nonnull final int[] viewDim = getViewDimensions(inputDimensions, outputDimensions);
int sourceOffset = 0;
int destinationOffset = 0;
if (inputDimensions[0] < outputDimensions[0]) {
destinationOffset += (outputDimensions[0] - inputDimensions[0]) / 2;
} else {
sourceOffset += (inputDimensions[0] - outputDimensions[0]) / 2;
}
if (inputDimensions[1] < outputDimensions[1]) {
destinationOffset += outputDimensions[0] * ((outputDimensions[1] - inputDimensions[1]) / 2);
} else {
sourceOffset += input.descriptor.hStride * ((inputDimensions[1] - outputDimensions[1]) / 2);
}
assert sourceOffset >= 0;
assert destinationOffset >= 0;
assert sourceOffset + Tensor.length(viewDim) <= Tensor.length(inputDimensions);
assert destinationOffset + Tensor.length(viewDim) <= Tensor.length(outputDimensions);
@Nonnull final CudaDevice.CudaTensorDescriptor sourceViewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
viewDim[2], //
viewDim[1], //
viewDim[0], //
input.descriptor.nStride, //
input.descriptor.cStride, //
input.descriptor.hStride, input.descriptor.wStride);
CudaMemory inputTensorMemory = input.getMemory(gpu);
try {
if (Arrays.equals(viewDim, outputDimensions)) {
assert sourceOffset >= 0;
assert destinationOffset == 0;
return CudaTensor.wrap(inputTensorMemory.withByteOffset(sourceOffset * precision.size), sourceViewDescriptor, precision);
}
@Nonnull final CudaDevice.CudaTensorDescriptor destinationViewDescriptor = gpu.newTensorDescriptor(//
precision, //
length, //
viewDim[2], //
viewDim[1], //
viewDim[0], //
outputDimensions[2] * outputDimensions[1] * outputDimensions[0], //
outputDimensions[1] * outputDimensions[0], //
outputDimensions[0], 1);
@Nonnull final CudaMemory outputBuffer = gpu.allocate((long) length * outputDimensions[2] * outputDimensions[1] * outputDimensions[0] * precision.size, MemoryType.Managed.normalize(), dirty);
CudaSystem.handle(gpu.cudnnTransformTensor(precision.getPointer(1.0), sourceViewDescriptor.getPtr(), inputTensorMemory.getPtr().withByteOffset(sourceOffset * precision.size), precision.getPointer(0.0), destinationViewDescriptor.getPtr(), outputBuffer.getPtr().withByteOffset(destinationOffset * precision.size)));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
inputTensorMemory.dirty();
outputBuffer.dirty();
Stream.<ReferenceCounting>of(sourceViewDescriptor, destinationViewDescriptor).forEach(ReferenceCounting::freeRef);
CudaDevice.CudaTensorDescriptor descriptorCudaResource = gpu.newTensorDescriptor(//
precision, //
length, //
outputDimensions[2], //
outputDimensions[1], //
outputDimensions[0], //
outputDimensions[2] * outputDimensions[1] * outputDimensions[0], //
outputDimensions[1] * outputDimensions[0], //
outputDimensions[0], 1);
return CudaTensor.wrap(outputBuffer, descriptorCudaResource, precision);
} finally {
inputTensorMemory.freeRef();
}
}
Aggregations