Search in sources :

Example 1 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class GramianLayer method getFeedback.

/**
 * Gets feedback.
 *
 * @param gpu         the gpu
 * @param inputTensor the input tensor
 * @param deltaTensor the delta tensor
 * @return the feedback
 */
@Nonnull
public CudaTensorList getFeedback(final CudnnHandle gpu, final CudaTensor inputTensor, final CudaTensor deltaTensor) {
    int pixels = inputTensor.descriptor.height * inputTensor.descriptor.width;
    CudaMemory inputMemory = inputTensor.getMemory(gpu);
    CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
    @Nonnull final int[] inputDimensions = { inputTensor.descriptor.width, inputTensor.descriptor.height, inputTensor.descriptor.channels };
    final int length = inputTensor.descriptor.batchCount;
    final int bands = inputDimensions[2];
    @Nullable final CudaMemory bufferMemory = gpu.allocate((long) inputTensor.descriptor.nStride * length * precision.size, MemoryType.Device, true);
    @Nonnull final CudaDevice.CudaTensorDescriptor bufferDescriptor = gpu.newTensorDescriptor(precision, length, bands, inputDimensions[1], inputDimensions[0], // 
    inputDimensions[0] * inputDimensions[1] * bands, // 
    inputDimensions[0] * inputDimensions[1], // 
    inputDimensions[0], 1);
    @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, bands, inputDimensions[1], inputDimensions[0], // 
    inputDimensions[0] * inputDimensions[1] * bands, // 
    inputDimensions[0] * inputDimensions[1], // 
    inputDimensions[0], 1);
    @Nullable final CudaMemory outputMemory = gpu.allocate((long) outputDescriptor.nStride * precision.size * length, MemoryType.Managed, true);
    @Nonnull final CudaMemory workspacePtr = gpu.allocate(Math.max(outputMemory.size, inputMemory.size), MemoryType.Device, true);
    @Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
    @Nonnull final CudaResource<cudnnOpTensorDescriptor> multiplyDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
    CudaResource<cudnnReduceTensorDescriptor> reduceAddDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
    @Nonnull final CudaDevice.CudaTensorDescriptor bandDescriptor = gpu.newTensorDescriptor(precision, length, 1, inputDimensions[1], inputDimensions[0], inputDimensions[2] * inputDimensions[1] * inputDimensions[0], inputDimensions[1] * inputDimensions[0], inputDimensions[0], 1);
    @Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor1 = gpu.newTensorDescriptor(// 
    precision, // 
    length, // 
    bands, // 
    1, // 
    1, // 
    deltaTensor.descriptor.nStride, // 
    deltaTensor.descriptor.cStride, // 
    deltaTensor.descriptor.hStride, deltaTensor.descriptor.wStride);
    @Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor2 = gpu.newTensorDescriptor(// 
    precision, // 
    length, // 
    bands, // 
    1, // 
    1, // 
    deltaTensor.descriptor.nStride, // 
    deltaTensor.descriptor.cStride * bands, // 
    deltaTensor.descriptor.hStride, // 
    deltaTensor.descriptor.wStride);
    IntStream.range(0, bands).forEach(band -> {
        CudaMemory deltaView1 = deltaMemory.withByteOffset(band * precision.size * bands);
        CudaSystem.handle(gpu.cudnnOpTensor(multiplyDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), viewDescriptor1.getPtr(), deltaView1.getPtr(), precision.getPointer(0.0), bufferDescriptor.getPtr(), bufferMemory.getPtr()));
        inputMemory.dirty();
        deltaView1.dirty();
        bufferMemory.dirty();
        deltaView1.freeRef();
        CudaMemory deltaView2 = deltaMemory.withByteOffset(band * precision.size);
        CudaSystem.handle(gpu.cudnnOpTensor(multiplyDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), viewDescriptor2.getPtr(), deltaView2.getPtr(), precision.getPointer(1.0), bufferDescriptor.getPtr(), bufferMemory.getPtr()));
        inputMemory.dirty();
        deltaView2.dirty();
        bufferMemory.dirty();
        deltaView2.freeRef();
        CudaMemory outputViewMem = outputMemory.withByteOffset(bandDescriptor.cStride * band * precision.size);
        gpu.cudnnReduceTensor(reduceAddDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha / pixels), bufferDescriptor.getPtr(), bufferMemory.getPtr(), precision.getPointer(0.0), bandDescriptor.getPtr(), outputViewMem.getPtr());
        outputViewMem.dirty();
        bufferMemory.dirty();
        outputViewMem.freeRef();
    });
    CudaTensorList feedback = CudaTensorList.wrap(CudaTensor.wrap(outputMemory, outputDescriptor, precision), length, inputDimensions, precision);
    bandDescriptor.freeRef();
    viewDescriptor1.freeRef();
    viewDescriptor2.freeRef();
    workspacePtr.freeRef();
    indexPtr.freeRef();
    reduceAddDescriptor.freeRef();
    inputMemory.freeRef();
    multiplyDescriptor.freeRef();
    deltaMemory.freeRef();
    bufferMemory.freeRef();
    bufferDescriptor.freeRef();
    return feedback;
}
Also used : CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) Nullable(javax.annotation.Nullable) Nonnull(javax.annotation.Nonnull)

Example 2 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class GramianLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
    assert 1 == inObj.length;
    TensorList inputData = inObj[0].getData();
    int[] inputDimensions = inputData.getDimensions();
    assert 3 == inputDimensions.length;
    return new Result(CudaSystem.run(gpu -> {
        CudaTensor tensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
        CudaTensorList output = getOutput(gpu, tensor);
        tensor.freeRef();
        return output;
    }, inputData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        @Nonnull final int[] outputDimensions = { 1, 1, inputDimensions[2] * inputDimensions[2] };
        if (!Arrays.equals(delta.getDimensions(), outputDimensions)) {
            throw new AssertionError(Arrays.toString(delta.getDimensions()) + " != " + Arrays.toString(outputDimensions));
        }
        if (inObj[0].isAlive()) {
            final TensorList passbackTensorList = CudaSystem.run(gpu -> {
                @Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
                CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
                delta.freeRef();
                CudaTensorList feedback = getFeedback(gpu, inputTensor, deltaTensor);
                deltaTensor.freeRef();
                inputTensor.freeRef();
                return feedback;
            }, delta);
            inObj[0].accumulate(buffer, passbackTensorList);
        } else {
            delta.freeRef();
        }
    }) {

        @Override
        public final void accumulate(DeltaSet<Layer> buffer, TensorList delta) {
            getAccumulator().accept(buffer, delta);
        }

        @Override
        protected void _free() {
            inputData.freeRef();
            Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
        }

        @Override
        public boolean isAlive() {
            return Arrays.stream(inObj).anyMatch(x -> x.isAlive());
        }
    };
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) LoggerFactory(org.slf4j.LoggerFactory) jcuda.jcudnn.cudnnReduceTensorOp(jcuda.jcudnn.cudnnReduceTensorOp) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) CudnnHandle(com.simiacryptus.mindseye.lang.cudnn.CudnnHandle) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) Logger(org.slf4j.Logger) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) jcuda.jcudnn.cudnnOpTensorOp(jcuda.jcudnn.cudnnOpTensorOp) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) jcuda.jcudnn.cudnnIndicesType(jcuda.jcudnn.cudnnIndicesType) jcuda.jcudnn.cudnnNanPropagation(jcuda.jcudnn.cudnnNanPropagation) jcuda.jcudnn.cudnnReduceTensorIndices(jcuda.jcudnn.cudnnReduceTensorIndices) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) Nonnull(javax.annotation.Nonnull) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Nullable(javax.annotation.Nullable) Result(com.simiacryptus.mindseye.lang.Result) Nullable(javax.annotation.Nullable)

Example 3 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class BandAvgReducerLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    final Result input = inObj[0];
    TensorList inputData = input.getData();
    @Nonnull final int[] inputSize = inputData.getDimensions();
    int length = inputData.length();
    final int bands = inputSize[2];
    CudaTensorList result = CudaSystem.run(gpu -> {
        CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
        @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, bands, 1, 1);
        long size = (long) precision.size * outputDescriptor.nStride * length;
        @Nonnull final CudaMemory outputPtr = gpu.allocate(size, MemoryType.Managed, true);
        CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_AVG, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
        CudaMemory inputMemory = inputTensor.getMemory(gpu);
        @Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
        @Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
        gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr());
        outputPtr.dirty();
        inputMemory.dirty();
        Stream.of(inputMemory, inputTensor, reduceTensorDescriptor, workspacePtr, indexPtr, inputData).forEach(ReferenceCounting::freeRef);
        return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, new int[] { 1, 1, bands }, precision);
    });
    int pixels = inputSize[0] * inputSize[1];
    return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
        TensorList passback;
        passback = TensorArray.wrap(delta.stream().map(x -> {
            Tensor tensor = new Tensor(inputSize[0], inputSize[1], inputSize[2]).setByCoord(c -> x.get(c.getCoords()[2]) * alpha / pixels);
            x.freeRef();
            return tensor;
        }).toArray(i -> new Tensor[i]));
        // passback = CudaSystem.run(gpu -> {
        // CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
        // @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision,
        // length, inputSize[2], inputSize[1], inputSize[0]);
        // @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
        // CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
        // @Nonnull final CudaDevice.CudaTensorDescriptor inputDescriptor = gpu.newTensorDescriptor(precision,
        // 1, 1, inputSize[1], inputSize[0]);
        // for(int batch=0;batch<length;batch++){
        // Tensor tensor = delta.get(batch);
        // for(int band=0;band<bands;band++){
        // int i = batch * bands + band;
        // CudaMemory img = outputPtr.withByteOffset(precision.size * i * outputDescriptor.cStride);
        // CudaMemory val = deltaMemory.withByteOffset(precision.size * i);
        // gpu.cudnnSetTensor(inputDescriptor.getPtr(), img.getPtr(), precision.getPointer(tensor.get(band) / outputDescriptor.cStride));
        // img.freeRef();
        // val.freeRef();
        // outputPtr.dirty().synchronize();
        // }
        // }
        // Stream.of(deltaMemory, deltaTensor, inputDescriptor).forEach(ReferenceCounting::freeRef);
        // return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, inputSize, precision);
        // });
        input.accumulate(ctx, passback);
    }) {

        @Override
        protected void _free() {
            super._free();
            input.freeRef();
        }
    };
}
Also used : JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) Tensor(com.simiacryptus.mindseye.lang.Tensor) jcuda.jcudnn.cudnnReduceTensorOp(jcuda.jcudnn.cudnnReduceTensorOp) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) jcuda.jcudnn.cudnnIndicesType(jcuda.jcudnn.cudnnIndicesType) jcuda.jcudnn.cudnnNanPropagation(jcuda.jcudnn.cudnnNanPropagation) jcuda.jcudnn.cudnnReduceTensorIndices(jcuda.jcudnn.cudnnReduceTensorIndices) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) Tensor(com.simiacryptus.mindseye.lang.Tensor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nullable(javax.annotation.Nullable)

Example 4 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class BinarySumLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
    if (inObj.length == 1) {
        if (rightFactor != 1)
            throw new IllegalStateException();
        if (leftFactor != 1)
            throw new IllegalStateException();
        return inObj[0];
    }
    if (inObj.length > 2) {
        if (rightFactor != 1)
            throw new IllegalStateException();
        if (leftFactor != 1)
            throw new IllegalStateException();
        return Arrays.stream(inObj).reduce((a, b) -> evalAndFree(a, b)).get();
    }
    assert (inObj.length == 2);
    final TensorList leftData = inObj[0].getData();
    final TensorList rightData = inObj[1].getData();
    int[] leftDimensions = leftData.getDimensions();
    if (3 < leftDimensions.length) {
        throw new IllegalArgumentException("dimensions=" + Arrays.toString(leftDimensions));
    }
    @Nonnull final int[] dimensions = { leftDimensions.length < 1 ? 0 : leftDimensions[0], leftDimensions.length < 2 ? 1 : leftDimensions[1], leftDimensions.length < 3 ? 1 : leftDimensions[2] };
    final int length = leftData.length();
    if (length != rightData.length())
        throw new IllegalArgumentException();
    if (3 != dimensions.length) {
        throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
    }
    for (int i = 1; i < inObj.length; i++) {
        if (Tensor.length(dimensions) != Tensor.length(inObj[i].getData().getDimensions())) {
            throw new IllegalArgumentException(Arrays.toString(dimensions) + " != " + Arrays.toString(inObj[i].getData().getDimensions()));
        }
    }
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    return new Result(CudaSystem.run(gpu -> {
        @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_ADD, precision);
        @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
        // .getDenseAndFree(gpu);//.moveTo(gpu.getDeviceNumber());
        @Nullable final CudaTensor lPtr = gpu.getTensor(leftData, precision, MemoryType.Device, false);
        // .getDenseAndFree(gpu);//.moveTo(gpu.getDeviceNumber());
        @Nullable final CudaTensor rPtr = gpu.getTensor(rightData, precision, MemoryType.Device, false);
        @Nonnull final CudaMemory outputPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed, true);
        CudaMemory lPtrMemory = lPtr.getMemory(gpu);
        CudaMemory rPtrMemory = rPtr.getMemory(gpu);
        gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(leftFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(rightFactor), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr());
        assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
        lPtrMemory.dirty();
        rPtrMemory.dirty();
        outputPtr.dirty();
        rPtrMemory.freeRef();
        lPtrMemory.freeRef();
        CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
        Stream.<ReferenceCounting>of(opDescriptor, lPtr, rPtr).forEach(ReferenceCounting::freeRef);
        return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
    }, leftData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        Runnable a = () -> {
            if (inObj[0].isAlive()) {
                CudaTensorList tensorList = CudaSystem.run(gpu -> {
                    @Nullable final CudaTensor lPtr = gpu.getTensor(delta, precision, MemoryType.Device, false);
                    @Nonnull final CudaMemory passbackPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed.normalize(), true);
                    @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
                    CudaMemory lPtrMemory = lPtr.getMemory(gpu);
                    gpu.cudnnTransformTensor(precision.getPointer(leftFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), passbackDescriptor.getPtr(), passbackPtr.getPtr());
                    assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
                    passbackPtr.dirty();
                    lPtrMemory.dirty();
                    lPtrMemory.freeRef();
                    CudaTensor cudaTensor = CudaTensor.wrap(passbackPtr, passbackDescriptor, precision);
                    lPtr.freeRef();
                    return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
                }, delta);
                inObj[0].accumulate(buffer, tensorList);
            }
        };
        Runnable b = () -> {
            if (inObj[1].isAlive()) {
                CudaTensorList tensorList = CudaSystem.run(gpu -> {
                    @Nullable final CudaTensor lPtr = gpu.getTensor(delta, precision, MemoryType.Device, false);
                    @Nonnull final CudaMemory outputPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed.normalize(), true);
                    @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
                    CudaMemory lPtrMemory = lPtr.getMemory(gpu);
                    gpu.cudnnTransformTensor(precision.getPointer(rightFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), passbackDescriptor.getPtr(), outputPtr.getPtr());
                    outputPtr.dirty();
                    lPtrMemory.dirty();
                    lPtrMemory.freeRef();
                    CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, passbackDescriptor, precision);
                    lPtr.freeRef();
                    return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
                }, delta);
                inObj[1].accumulate(buffer, tensorList);
            }
        };
        if (CoreSettings.INSTANCE.isSingleThreaded())
            TestUtil.runAllSerial(a, b);
        else
            TestUtil.runAllParallel(a, b);
    }) {

        @Override
        protected void _free() {
            Arrays.stream(inObj).forEach(x -> x.freeRef());
            leftData.freeRef();
            rightData.freeRef();
        }

        @Override
        public boolean isAlive() {
            for (@Nonnull final Result element : inObj) if (element.isAlive()) {
                return true;
            }
            return false;
        }
    };
}
Also used : PipelineNetwork(com.simiacryptus.mindseye.network.PipelineNetwork) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) Tensor(com.simiacryptus.mindseye.lang.Tensor) SumInputsLayer(com.simiacryptus.mindseye.layers.java.SumInputsLayer) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) jcuda.jcudnn.cudnnOpTensorOp(jcuda.jcudnn.cudnnOpTensorOp) TestUtil(com.simiacryptus.mindseye.test.TestUtil) CoreSettings(com.simiacryptus.mindseye.lang.CoreSettings) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) Nonnull(javax.annotation.Nonnull) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) Nullable(javax.annotation.Nullable)

Example 5 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class AvgReducerLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    final Result input = inObj[0];
    final TensorList inputData = input.getData();
    @Nonnull final int[] inputSize = inputData.getDimensions();
    int length = inputData.length();
    CudaTensorList result = CudaSystem.run(gpu -> {
        CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
        inputData.freeRef();
        CudaMemory inputMemory = inputTensor.getMemory(gpu);
        @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, 1, 1, 1);
        long size = (long) precision.size * outputDescriptor.nStride * length;
        @Nonnull final CudaMemory outputMemory = gpu.allocate(size, MemoryType.Managed, true);
        CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_AVG, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
        @Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
        @Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
        // outputPtr.synchronize();
        gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputMemory.getPtr());
        outputMemory.dirty();
        inputMemory.dirty();
        Stream.of(inputTensor, inputMemory, reduceTensorDescriptor, workspacePtr, indexPtr).forEach(ReferenceCounting::freeRef);
        return CudaTensorList.wrap(CudaTensor.wrap(outputMemory, outputDescriptor, precision), length, new int[] { 1, 1, 1 }, precision);
    });
    return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
        // Not supported by CuDNN?
        // CudaTensorList passback = CudaSystem.run(gpu -> {
        // CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
        // CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
        // 
        // @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor1 = gpu.newTensorDescriptor(
        // precision, length, inputSize[2], inputSize[1], inputSize[0]
        // );
        // @Nonnull final CudaMemory passbackPtr1 = gpu.allocate((long) precision.size * passbackDescriptor1.nStride * length, MemoryType.Device, false);
        // gpu.cudnnAddTensor(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaMemory.getPtr(),
        // precision.getPointer(1.0), passbackDescriptor1.getPtr(), passbackPtr1.getPtr());
        // passbackPtr1.dirty();
        // 
        // Stream.of(deltaTensor, deltaMemory, passbackDescriptor1, passbackPtr1).forEach(ReferenceCounting::freeRef);
        // return CudaTensorList.wrap(CudaTensor.wrap(passbackPtr1, passbackDescriptor1, precision), length, inputSize, precision);
        // });
        TensorList passback = TensorArray.wrap(IntStream.range(0, length).mapToObj(i -> {
            Tensor tensor = delta.get(i);
            Tensor tensor1 = new Tensor(inputSize).setAll((double) tensor.get(0) / Tensor.length(inputSize));
            tensor.freeRef();
            return tensor1;
        }).toArray(i -> new Tensor[i]));
        input.accumulate(ctx, passback);
    }) {

        @Override
        protected void _free() {
            super._free();
            input.freeRef();
        }
    };
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) Tensor(com.simiacryptus.mindseye.lang.Tensor) jcuda.jcudnn.cudnnReduceTensorOp(jcuda.jcudnn.cudnnReduceTensorOp) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) jcuda.jcudnn.cudnnIndicesType(jcuda.jcudnn.cudnnIndicesType) jcuda.jcudnn.cudnnNanPropagation(jcuda.jcudnn.cudnnNanPropagation) jcuda.jcudnn.cudnnReduceTensorIndices(jcuda.jcudnn.cudnnReduceTensorIndices) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) Tensor(com.simiacryptus.mindseye.lang.Tensor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nullable(javax.annotation.Nullable)

Aggregations

CudaDevice (com.simiacryptus.mindseye.lang.cudnn.CudaDevice)9 CudaMemory (com.simiacryptus.mindseye.lang.cudnn.CudaMemory)9 CudaTensorList (com.simiacryptus.mindseye.lang.cudnn.CudaTensorList)9 Nonnull (javax.annotation.Nonnull)9 Nullable (javax.annotation.Nullable)9 Layer (com.simiacryptus.mindseye.lang.Layer)8 TensorList (com.simiacryptus.mindseye.lang.TensorList)8 CudaSystem (com.simiacryptus.mindseye.lang.cudnn.CudaSystem)8 CudaTensor (com.simiacryptus.mindseye.lang.cudnn.CudaTensor)8 MemoryType (com.simiacryptus.mindseye.lang.cudnn.MemoryType)8 Precision (com.simiacryptus.mindseye.lang.cudnn.Precision)8 Arrays (java.util.Arrays)8 ReferenceCounting (com.simiacryptus.mindseye.lang.ReferenceCounting)7 Stream (java.util.stream.Stream)7 JsonObject (com.google.gson.JsonObject)6 DataSerializer (com.simiacryptus.mindseye.lang.DataSerializer)6 DeltaSet (com.simiacryptus.mindseye.lang.DeltaSet)6 LayerBase (com.simiacryptus.mindseye.lang.LayerBase)6 Result (com.simiacryptus.mindseye.lang.Result)6 Tensor (com.simiacryptus.mindseye.lang.Tensor)6