Search in sources :

Example 1 with CudaMemory

use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.

the class GramianLayer method getFeedback.

/**
 * Gets feedback.
 *
 * @param gpu         the gpu
 * @param inputTensor the input tensor
 * @param deltaTensor the delta tensor
 * @return the feedback
 */
@Nonnull
public CudaTensorList getFeedback(final CudnnHandle gpu, final CudaTensor inputTensor, final CudaTensor deltaTensor) {
    int pixels = inputTensor.descriptor.height * inputTensor.descriptor.width;
    CudaMemory inputMemory = inputTensor.getMemory(gpu);
    CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
    @Nonnull final int[] inputDimensions = { inputTensor.descriptor.width, inputTensor.descriptor.height, inputTensor.descriptor.channels };
    final int length = inputTensor.descriptor.batchCount;
    final int bands = inputDimensions[2];
    @Nullable final CudaMemory bufferMemory = gpu.allocate((long) inputTensor.descriptor.nStride * length * precision.size, MemoryType.Device, true);
    @Nonnull final CudaDevice.CudaTensorDescriptor bufferDescriptor = gpu.newTensorDescriptor(precision, length, bands, inputDimensions[1], inputDimensions[0], // 
    inputDimensions[0] * inputDimensions[1] * bands, // 
    inputDimensions[0] * inputDimensions[1], // 
    inputDimensions[0], 1);
    @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, bands, inputDimensions[1], inputDimensions[0], // 
    inputDimensions[0] * inputDimensions[1] * bands, // 
    inputDimensions[0] * inputDimensions[1], // 
    inputDimensions[0], 1);
    @Nullable final CudaMemory outputMemory = gpu.allocate((long) outputDescriptor.nStride * precision.size * length, MemoryType.Managed, true);
    @Nonnull final CudaMemory workspacePtr = gpu.allocate(Math.max(outputMemory.size, inputMemory.size), MemoryType.Device, true);
    @Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
    @Nonnull final CudaResource<cudnnOpTensorDescriptor> multiplyDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
    CudaResource<cudnnReduceTensorDescriptor> reduceAddDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
    @Nonnull final CudaDevice.CudaTensorDescriptor bandDescriptor = gpu.newTensorDescriptor(precision, length, 1, inputDimensions[1], inputDimensions[0], inputDimensions[2] * inputDimensions[1] * inputDimensions[0], inputDimensions[1] * inputDimensions[0], inputDimensions[0], 1);
    @Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor1 = gpu.newTensorDescriptor(// 
    precision, // 
    length, // 
    bands, // 
    1, // 
    1, // 
    deltaTensor.descriptor.nStride, // 
    deltaTensor.descriptor.cStride, // 
    deltaTensor.descriptor.hStride, deltaTensor.descriptor.wStride);
    @Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor2 = gpu.newTensorDescriptor(// 
    precision, // 
    length, // 
    bands, // 
    1, // 
    1, // 
    deltaTensor.descriptor.nStride, // 
    deltaTensor.descriptor.cStride * bands, // 
    deltaTensor.descriptor.hStride, // 
    deltaTensor.descriptor.wStride);
    IntStream.range(0, bands).forEach(band -> {
        CudaMemory deltaView1 = deltaMemory.withByteOffset(band * precision.size * bands);
        CudaSystem.handle(gpu.cudnnOpTensor(multiplyDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), viewDescriptor1.getPtr(), deltaView1.getPtr(), precision.getPointer(0.0), bufferDescriptor.getPtr(), bufferMemory.getPtr()));
        inputMemory.dirty();
        deltaView1.dirty();
        bufferMemory.dirty();
        deltaView1.freeRef();
        CudaMemory deltaView2 = deltaMemory.withByteOffset(band * precision.size);
        CudaSystem.handle(gpu.cudnnOpTensor(multiplyDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), viewDescriptor2.getPtr(), deltaView2.getPtr(), precision.getPointer(1.0), bufferDescriptor.getPtr(), bufferMemory.getPtr()));
        inputMemory.dirty();
        deltaView2.dirty();
        bufferMemory.dirty();
        deltaView2.freeRef();
        CudaMemory outputViewMem = outputMemory.withByteOffset(bandDescriptor.cStride * band * precision.size);
        gpu.cudnnReduceTensor(reduceAddDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha / pixels), bufferDescriptor.getPtr(), bufferMemory.getPtr(), precision.getPointer(0.0), bandDescriptor.getPtr(), outputViewMem.getPtr());
        outputViewMem.dirty();
        bufferMemory.dirty();
        outputViewMem.freeRef();
    });
    CudaTensorList feedback = CudaTensorList.wrap(CudaTensor.wrap(outputMemory, outputDescriptor, precision), length, inputDimensions, precision);
    bandDescriptor.freeRef();
    viewDescriptor1.freeRef();
    viewDescriptor2.freeRef();
    workspacePtr.freeRef();
    indexPtr.freeRef();
    reduceAddDescriptor.freeRef();
    inputMemory.freeRef();
    multiplyDescriptor.freeRef();
    deltaMemory.freeRef();
    bufferMemory.freeRef();
    bufferDescriptor.freeRef();
    return feedback;
}
Also used : CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) Nullable(javax.annotation.Nullable) Nonnull(javax.annotation.Nonnull)

Example 2 with CudaMemory

use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.

the class GramianLayer method getOutput.

/**
 * Gets output.
 *
 * @param gpu         the gpu
 * @param inputTensor the input tensor
 * @return the output
 */
@Nonnull
public CudaTensorList getOutput(final CudnnHandle gpu, final CudaTensor inputTensor) {
    int pixels = inputTensor.descriptor.height * inputTensor.descriptor.width;
    @Nonnull final int[] inputDimensions = { inputTensor.descriptor.width, inputTensor.descriptor.height, inputTensor.descriptor.channels };
    final int length = inputTensor.descriptor.batchCount;
    final int bands = inputDimensions[2];
    @Nonnull final int[] outputDimensions = { 1, 1, bands * bands };
    CudaMemory inputMemory = inputTensor.getMemory(gpu);
    @Nonnull final CudaDevice.CudaTensorDescriptor ouputDescriptor = gpu.newTensorDescriptor(precision, length, bands * bands, 1, 1, // 
    bands * bands, // 
    1, // 
    1, 1);
    @Nullable final CudaMemory outputMemory = gpu.allocate((long) ouputDescriptor.nStride * precision.size * length, MemoryType.Device, true);
    @Nonnull final CudaDevice.CudaTensorDescriptor bufferDescriptor = gpu.newTensorDescriptor(precision, length, bands, inputDimensions[1], inputDimensions[0], // 
    inputDimensions[0] * inputDimensions[1] * bands, // 
    inputDimensions[0] * inputDimensions[1], // 
    inputDimensions[0], 1);
    @Nullable final CudaMemory bufferMemory = gpu.allocate((long) bufferDescriptor.nStride * length * precision.size, MemoryType.Device, true);
    @Nonnull final CudaDevice.CudaTensorDescriptor inputViewDescriptor = gpu.newTensorDescriptor(precision, length, 1, inputDimensions[1], inputDimensions[0], // 
    inputTensor.descriptor.nStride, // 
    inputTensor.descriptor.cStride, // 
    inputTensor.descriptor.hStride, inputTensor.descriptor.wStride);
    CudaResource<cudnnReduceTensorDescriptor> reduceAddDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
    @Nonnull final CudaDevice.CudaTensorDescriptor outputViewDescriptor = gpu.newTensorDescriptor(precision, length, bands, 1, 1, bands * bands, 1, 1, 1);
    @Nonnull final CudaResource<cudnnOpTensorDescriptor> multiplyDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
    @Nonnull final CudaMemory workspacePtr = gpu.allocate(Math.max(outputMemory.size, inputMemory.size), MemoryType.Device, true);
    @Nonnull final CudaMemory indexPtr = gpu.allocate((long) 12 * length, MemoryType.Device, true);
    IntStream.range(0, inputDimensions[2]).forEach(band -> {
        CudaMemory inputView = inputMemory.withByteOffset(band * precision.size * inputTensor.descriptor.cStride);
        CudaSystem.handle(gpu.cudnnOpTensor(multiplyDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), inputViewDescriptor.getPtr(), inputView.getPtr(), precision.getPointer(0.0), bufferDescriptor.getPtr(), bufferMemory.getPtr()));
        bufferMemory.dirty();
        inputView.dirty();
        inputMemory.dirty();
        inputView.freeRef();
        CudaMemory outputView = outputMemory.withByteOffset(band * precision.size * bands);
        CudaSystem.handle(gpu.cudnnReduceTensor(reduceAddDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha / pixels), bufferDescriptor.getPtr(), bufferMemory.getPtr(), precision.getPointer(0.0), outputViewDescriptor.getPtr(), outputView.getPtr()));
        outputView.dirty();
        bufferMemory.dirty();
        outputView.freeRef();
    });
    outputMemory.dirty();
    bufferMemory.dirty();
    inputMemory.dirty();
    bufferMemory.freeRef();
    multiplyDescriptor.freeRef();
    inputMemory.freeRef();
    bufferDescriptor.freeRef();
    inputViewDescriptor.freeRef();
    outputViewDescriptor.freeRef();
    reduceAddDescriptor.freeRef();
    workspacePtr.freeRef();
    indexPtr.freeRef();
    return CudaTensorList.wrap(CudaTensor.wrap(outputMemory, ouputDescriptor, precision), length, outputDimensions, precision);
}
Also used : CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) Nullable(javax.annotation.Nullable) Nonnull(javax.annotation.Nonnull)

Example 3 with CudaMemory

use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.

the class ImgBandBiasLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    if (inObj.length != 1) {
        throw new IllegalArgumentException("inObj.length=" + inObj.length);
    }
    Result input = inObj[0];
    final TensorList leftData = input.getData();
    @Nonnull final int[] inputDimensions = leftData.getDimensions();
    final int length = leftData.length();
    if (3 != inputDimensions.length) {
        throw new IllegalArgumentException("dimensions=" + Arrays.toString(inputDimensions));
    }
    // assert !right.isAlive();
    return new Result(CudaSystem.run(gpu -> {
        @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_ADD, precision);
        @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, inputDimensions[2], inputDimensions[1], inputDimensions[0], inputDimensions[2] * inputDimensions[1] * inputDimensions[0], inputDimensions[1] * inputDimensions[0], inputDimensions[0], 1);
        @Nullable final CudaTensor inputTensor = gpu.getTensor(leftData, precision, MemoryType.Device, false);
        CudaMemory biasMem = gpu.allocate(bias.length() * precision.size, MemoryType.Device, true).write(precision, bias.getData());
        int[] biasDim = bias.getDimensions();
        CudaDevice.CudaTensorDescriptor biasDescriptor = gpu.newTensorDescriptor(precision, 1, biasDim[2], biasDim[1], biasDim[0], biasDim[2] * biasDim[1] * biasDim[0], biasDim[1] * biasDim[0], biasDim[0], 1);
        // assert lPtr.size == rPtr.size;
        @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Managed.normalize(), true);
        CudaMemory inputMemory = inputTensor.getMemory(gpu);
        CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(1.0), biasDescriptor.getPtr(), biasMem.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
        assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
        inputMemory.dirty();
        biasMem.dirty();
        outputPtr.dirty();
        inputMemory.freeRef();
        biasMem.freeRef();
        biasDescriptor.freeRef();
        inputTensor.freeRef();
        opDescriptor.freeRef();
        CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
        return CudaTensorList.wrap(cudaTensor, length, inputDimensions, precision);
    }, leftData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        if (!isFrozen()) {
            @Nonnull double[] biasDelta = CudaSystem.run(gpu -> {
                @Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
                CudaMemory biasMem = gpu.allocate(bias.length() * precision.size, MemoryType.Device, true).write(precision, bias.getData());
                int[] biasDim = bias.getDimensions();
                CudaDevice.CudaTensorDescriptor biasDescriptor = gpu.newTensorDescriptor(precision, 1, biasDim[2], biasDim[1], biasDim[0], biasDim[2] * biasDim[1] * biasDim[0], biasDim[1] * biasDim[0], biasDim[0], 1);
                CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
                gpu.cudnnConvolutionBackwardBias(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(0.0), biasDescriptor.getPtr(), biasMem.getPtr());
                assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
                biasMem.dirty();
                double[] biasV = new double[bias.length()];
                biasMem.read(precision, biasV);
                Stream.<ReferenceCounting>of(biasMem, deltaTensorMemory, deltaTensor, biasDescriptor).forEach(ReferenceCounting::freeRef);
                return biasV;
            }, delta);
            buffer.get(ImgBandBiasLayer.this, bias).addInPlace(biasDelta).freeRef();
        }
        if (input.isAlive()) {
            input.accumulate(buffer, delta);
        } else {
            delta.freeRef();
        }
    }) {

        @Override
        public final void accumulate(DeltaSet<Layer> buffer, TensorList delta) {
            getAccumulator().accept(buffer, delta);
        }

        @Override
        protected void _free() {
            leftData.freeRef();
            input.freeRef();
        }

        @Override
        public boolean isAlive() {
            for (@Nonnull final Result element : inObj) if (element.isAlive()) {
                return true;
            }
            return false;
        }
    };
}
Also used : JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) Tensor(com.simiacryptus.mindseye.lang.Tensor) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) Util(com.simiacryptus.util.Util) IntToDoubleFunction(java.util.function.IntToDoubleFunction) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) jcuda.jcudnn.cudnnOpTensorOp(jcuda.jcudnn.cudnnOpTensorOp) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) FastRandom(com.simiacryptus.util.FastRandom) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) DoubleSupplier(java.util.function.DoubleSupplier) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) ProductInputsLayer(com.simiacryptus.mindseye.layers.java.ProductInputsLayer) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Example 4 with CudaMemory

use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.

the class ImgLinearSubnetLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
    assert 1 == inObj.length;
    Result input = inObj[0];
    TensorList inputData = input.getData();
    @Nonnull final int[] inputDims = inputData.getDimensions();
    assert 3 == inputDims.length;
    int length = inputData.length();
    int maxBand = legs.stream().mapToInt(x -> x.toBand).max().getAsInt();
    assert maxBand == inputDims[2] : maxBand + " != " + inputDims[2];
    assert IntStream.range(0, maxBand).allMatch(i -> 1 == legs.stream().filter(x -> x.fromBand <= i && x.toBand > i).count());
    CudaTensor passback = CudaSystem.run(gpu -> {
        return CudaTensor.wrap(gpu.allocate(inputData.getElements() * precision.size, MemoryType.Device, true), gpu.newTensorDescriptor(precision, length, inputDims[2], inputDims[1], inputDims[0]), precision);
    });
    try {
        AtomicInteger counter = new AtomicInteger(0);
        SumInputsLayer sumInputsLayer = new SumInputsLayer();
        try {
            Result[] legResults = legs.stream().map(leg -> {
                passback.addRef();
                ImgBandSelectLayer imgBandSelectLayer = new ImgBandSelectLayer(leg.fromBand, leg.toBand);
                input.addRef();
                TensorList legData = imgBandSelectLayer.eval(input).getDataAndFree();
                imgBandSelectLayer.freeRef();
                return leg.inner.evalAndFree(new Result(legData, (DeltaSet<Layer> ctx, TensorList delta) -> {
                    int[] outputDimensions = delta.getDimensions();
                    int[] inputDimensions = inputDims;
                    synchronized (passback) {
                        CudaSystem.run(gpu -> {
                            @Nonnull final CudaDevice.CudaTensorDescriptor viewDescriptor = gpu.newTensorDescriptor(// 
                            precision, // 
                            length, // 
                            outputDimensions[2], // 
                            outputDimensions[1], // 
                            outputDimensions[0], // 
                            inputDimensions[2] * inputDimensions[1] * inputDimensions[0], // 
                            inputDimensions[1] * inputDimensions[0], // 
                            inputDimensions[0], 1);
                            final int byteOffset = viewDescriptor.cStride * leg.fromBand * precision.size;
                            assert delta.length() == inputData.length();
                            assert passback.getDeviceId() == gpu.getDeviceId();
                            // assert error.stream().flatMapToDouble(x-> Arrays.stream(x.getData())).allMatch(Double::isFinite);
                            @Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
                            @Nonnull final CudaMemory passbackBuffer = passback.getMemory(gpu);
                            CudaMemory errorPtrMemory = deltaTensor.getMemory(gpu);
                            passbackBuffer.synchronize();
                            gpu.cudnnTransformTensor(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), errorPtrMemory.getPtr(), precision.getPointer(0.0), viewDescriptor.getPtr(), passbackBuffer.getPtr().withByteOffset(byteOffset));
                            errorPtrMemory.dirty();
                            passbackBuffer.dirty();
                            Stream.<ReferenceCounting>of(deltaTensor, viewDescriptor, passbackBuffer, errorPtrMemory).forEach(ReferenceCounting::freeRef);
                        }, passback);
                    }
                    if (counter.incrementAndGet() >= legs.size()) {
                        counter.set(0);
                        input.accumulate(ctx, CudaTensorList.create(passback, length, inputDims, precision));
                    }
                }) {

                    @Override
                    protected void _free() {
                        super._free();
                        input.freeRef();
                        passback.freeRef();
                    }
                });
            }).toArray(i -> new Result[i]);
            return sumInputsLayer.setParallel(parallel).setPrecision(precision).evalAndFree(legResults);
        } finally {
            sumInputsLayer.freeRef();
            input.freeRef();
        }
    } finally {
        passback.freeRef();
    }
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) LoggerFactory(org.slf4j.LoggerFactory) ReferenceCountingBase(com.simiacryptus.mindseye.lang.ReferenceCountingBase) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) ArrayList(java.util.ArrayList) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Logger(org.slf4j.Logger) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) JsonArray(com.google.gson.JsonArray) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Example 5 with CudaMemory

use of com.simiacryptus.mindseye.lang.cudnn.CudaMemory in project MindsEye by SimiaCryptus.

the class ImgCropLayer method copy.

/**
 * Copy cuda tensor.
 *
 * @param gpu              the gpu
 * @param input            the input tensor
 * @param length           the length
 * @param inputDimensions  the input dimensions
 * @param outputDimensions the output dimensions
 * @param dirty            the dirty
 * @param precision        the precision
 * @return the cuda tensor
 */
public static CudaTensor copy(final CudnnHandle gpu, final CudaTensor input, final int length, final int[] inputDimensions, final int[] outputDimensions, final boolean dirty, Precision precision) {
    if (3 != inputDimensions.length)
        throw new IllegalArgumentException("inputDimensions.length");
    if (3 != outputDimensions.length)
        throw new IllegalArgumentException("dimOut.length");
    if (inputDimensions[2] != outputDimensions[2]) {
        throw new IllegalArgumentException(String.format("%d != %d", inputDimensions[2], outputDimensions[2]));
    }
    // log.info(String.format("offset=%d,%d", offsetX, offsetY));
    @Nonnull final int[] viewDim = getViewDimensions(inputDimensions, outputDimensions);
    int sourceOffset = 0;
    int destinationOffset = 0;
    if (inputDimensions[0] < outputDimensions[0]) {
        destinationOffset += (outputDimensions[0] - inputDimensions[0]) / 2;
    } else {
        sourceOffset += (inputDimensions[0] - outputDimensions[0]) / 2;
    }
    if (inputDimensions[1] < outputDimensions[1]) {
        destinationOffset += outputDimensions[0] * ((outputDimensions[1] - inputDimensions[1]) / 2);
    } else {
        sourceOffset += input.descriptor.hStride * ((inputDimensions[1] - outputDimensions[1]) / 2);
    }
    assert sourceOffset >= 0;
    assert destinationOffset >= 0;
    assert sourceOffset + Tensor.length(viewDim) <= Tensor.length(inputDimensions);
    assert destinationOffset + Tensor.length(viewDim) <= Tensor.length(outputDimensions);
    @Nonnull final CudaDevice.CudaTensorDescriptor sourceViewDescriptor = gpu.newTensorDescriptor(// 
    precision, // 
    length, // 
    viewDim[2], // 
    viewDim[1], // 
    viewDim[0], // 
    input.descriptor.nStride, // 
    input.descriptor.cStride, // 
    input.descriptor.hStride, input.descriptor.wStride);
    CudaMemory inputTensorMemory = input.getMemory(gpu);
    try {
        if (Arrays.equals(viewDim, outputDimensions)) {
            assert sourceOffset >= 0;
            assert destinationOffset == 0;
            return CudaTensor.wrap(inputTensorMemory.withByteOffset(sourceOffset * precision.size), sourceViewDescriptor, precision);
        }
        @Nonnull final CudaDevice.CudaTensorDescriptor destinationViewDescriptor = gpu.newTensorDescriptor(// 
        precision, // 
        length, // 
        viewDim[2], // 
        viewDim[1], // 
        viewDim[0], // 
        outputDimensions[2] * outputDimensions[1] * outputDimensions[0], // 
        outputDimensions[1] * outputDimensions[0], // 
        outputDimensions[0], 1);
        @Nonnull final CudaMemory outputBuffer = gpu.allocate((long) length * outputDimensions[2] * outputDimensions[1] * outputDimensions[0] * precision.size, MemoryType.Managed.normalize(), dirty);
        CudaSystem.handle(gpu.cudnnTransformTensor(precision.getPointer(1.0), sourceViewDescriptor.getPtr(), inputTensorMemory.getPtr().withByteOffset(sourceOffset * precision.size), precision.getPointer(0.0), destinationViewDescriptor.getPtr(), outputBuffer.getPtr().withByteOffset(destinationOffset * precision.size)));
        assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
        inputTensorMemory.dirty();
        outputBuffer.dirty();
        Stream.<ReferenceCounting>of(sourceViewDescriptor, destinationViewDescriptor).forEach(ReferenceCounting::freeRef);
        CudaDevice.CudaTensorDescriptor descriptorCudaResource = gpu.newTensorDescriptor(// 
        precision, // 
        length, // 
        outputDimensions[2], // 
        outputDimensions[1], // 
        outputDimensions[0], // 
        outputDimensions[2] * outputDimensions[1] * outputDimensions[0], // 
        outputDimensions[1] * outputDimensions[0], // 
        outputDimensions[0], 1);
        return CudaTensor.wrap(outputBuffer, descriptorCudaResource, precision);
    } finally {
        inputTensorMemory.freeRef();
    }
}
Also used : CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory)

Aggregations

CudaMemory (com.simiacryptus.mindseye.lang.cudnn.CudaMemory)26 CudaDevice (com.simiacryptus.mindseye.lang.cudnn.CudaDevice)25 Nonnull (javax.annotation.Nonnull)25 Nullable (javax.annotation.Nullable)21 CudaTensor (com.simiacryptus.mindseye.lang.cudnn.CudaTensor)20 CudaTensorList (com.simiacryptus.mindseye.lang.cudnn.CudaTensorList)18 DeltaSet (com.simiacryptus.mindseye.lang.DeltaSet)17 ReferenceCounting (com.simiacryptus.mindseye.lang.ReferenceCounting)17 Result (com.simiacryptus.mindseye.lang.Result)17 TensorList (com.simiacryptus.mindseye.lang.TensorList)17 MemoryType (com.simiacryptus.mindseye.lang.cudnn.MemoryType)14 JsonObject (com.google.gson.JsonObject)13 DataSerializer (com.simiacryptus.mindseye.lang.DataSerializer)13 Layer (com.simiacryptus.mindseye.lang.Layer)13 LayerBase (com.simiacryptus.mindseye.lang.LayerBase)13 CudaSystem (com.simiacryptus.mindseye.lang.cudnn.CudaSystem)13 Precision (com.simiacryptus.mindseye.lang.cudnn.Precision)13 List (java.util.List)13 Map (java.util.Map)13 Arrays (java.util.Arrays)12