Search in sources :

Example 6 with ReferenceCounting

use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.

the class MeanSqLossLayer method eval.

@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
    if (2 != inObj.length)
        throw new IllegalArgumentException();
    final int leftLength = inObj[0].getData().length();
    final int rightLength = inObj[1].getData().length();
    Arrays.stream(inObj).forEach(ReferenceCounting::addRef);
    if (leftLength != rightLength && leftLength != 1 && rightLength != 1) {
        throw new IllegalArgumentException(leftLength + " != " + rightLength);
    }
    @Nonnull final Tensor[] diffs = new Tensor[leftLength];
    return new Result(TensorArray.wrap(IntStream.range(0, leftLength).mapToObj(dataIndex -> {
        @Nullable final Tensor a = inObj[0].getData().get(1 == leftLength ? 0 : dataIndex);
        @Nullable final Tensor b = inObj[1].getData().get(1 == rightLength ? 0 : dataIndex);
        if (a.length() != b.length()) {
            throw new IllegalArgumentException(String.format("%s != %s", Arrays.toString(a.getDimensions()), Arrays.toString(b.getDimensions())));
        }
        @Nonnull final Tensor r = a.minus(b);
        a.freeRef();
        b.freeRef();
        diffs[dataIndex] = r;
        @Nonnull Tensor statsTensor = new Tensor(new double[] { r.sumSq() / r.length() }, 1);
        return statsTensor;
    }).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList data) -> {
        if (inObj[0].isAlive()) {
            Stream<Tensor> tensorStream = IntStream.range(0, data.length()).parallel().mapToObj(dataIndex -> {
                @Nullable Tensor tensor = data.get(dataIndex);
                Tensor diff = diffs[dataIndex];
                @Nullable Tensor scale = diff.scale(tensor.get(0) * 2.0 / diff.length());
                tensor.freeRef();
                return scale;
            }).collect(Collectors.toList()).stream();
            if (1 == leftLength) {
                tensorStream = Stream.of(tensorStream.reduce((a, b) -> {
                    @Nullable Tensor c = a.addAndFree(b);
                    b.freeRef();
                    return c;
                }).get());
            }
            @Nonnull final TensorList array = TensorArray.wrap(tensorStream.toArray(i -> new Tensor[i]));
            inObj[0].accumulate(buffer, array);
        }
        if (inObj[1].isAlive()) {
            Stream<Tensor> tensorStream = IntStream.range(0, data.length()).parallel().mapToObj(dataIndex -> {
                @Nullable Tensor tensor = data.get(dataIndex);
                @Nullable Tensor scale = diffs[dataIndex].scale(tensor.get(0) * 2.0 / diffs[dataIndex].length());
                tensor.freeRef();
                return scale;
            }).collect(Collectors.toList()).stream();
            if (1 == rightLength) {
                tensorStream = Stream.of(tensorStream.reduce((a, b) -> {
                    @Nullable Tensor c = a.addAndFree(b);
                    b.freeRef();
                    return c;
                }).get());
            }
            @Nonnull final TensorList array = TensorArray.wrap(tensorStream.map(x -> {
                @Nullable Tensor scale = x.scale(-1);
                x.freeRef();
                return scale;
            }).toArray(i -> new Tensor[i]));
            inObj[1].accumulate(buffer, array);
        }
    }) {

        @Override
        protected void _free() {
            Arrays.stream(inObj).forEach(ReferenceCounting::freeRef);
            Arrays.stream(diffs).forEach(ReferenceCounting::freeRef);
        }

        @Override
        public boolean isAlive() {
            return inObj[0].isAlive() || inObj[1].isAlive();
        }
    };
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) Logger(org.slf4j.Logger) LoggerFactory(org.slf4j.LoggerFactory) Tensor(com.simiacryptus.mindseye.lang.Tensor) Result(com.simiacryptus.mindseye.lang.Result) Collectors(java.util.stream.Collectors) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) TensorList(com.simiacryptus.mindseye.lang.TensorList) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Tensor(com.simiacryptus.mindseye.lang.Tensor) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) TensorList(com.simiacryptus.mindseye.lang.TensorList) Nullable(javax.annotation.Nullable) Result(com.simiacryptus.mindseye.lang.Result) Nonnull(javax.annotation.Nonnull)

Example 7 with ReferenceCounting

use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.

the class ImgTileSelectLayer method copy.

/**
 * Copy cuda tensor.
 *
 * @param gpu              the gpu
 * @param input            the input
 * @param inputDimensions  the input dimensions
 * @param outputDimensions the output dimensions
 * @param positionX        the position x
 * @param positionY        the position y
 * @param precision        the precision
 * @param outputPtr        the output ptr
 * @return the cuda tensor
 */
public static CudaTensor copy(final CudnnHandle gpu, @Nonnull final TensorList input, final int[] inputDimensions, final int[] outputDimensions, final int positionX, final int positionY, final Precision precision, final CudaMemory outputPtr) {
    final int length = input.length();
    if (3 != inputDimensions.length)
        throw new IllegalArgumentException("inputDimensions.length");
    if (3 != outputDimensions.length)
        throw new IllegalArgumentException("dimOut.length");
    int bands = inputDimensions[2];
    if (bands != outputDimensions[2])
        throw new IllegalArgumentException(String.format("%d != %d", bands, outputDimensions[2]));
    // log.info(String.format("offset=%d,%d", offsetX, offsetY));
    @Nonnull final int[] viewDim = getViewDimensions(inputDimensions, outputDimensions, new int[] { positionX, positionY, 0 });
    @Nullable final CudaTensor inputTensor = gpu.getTensor(input, precision, MemoryType.Device, false);
    int sourceOffset = 0;
    int destinationOffset = 0;
    if (positionX < 0) {
        destinationOffset += Math.abs(positionX);
    } else {
        sourceOffset += Math.abs(positionX);
    }
    if (positionY < 0) {
        destinationOffset += outputDimensions[0] * Math.abs((positionY));
    } else {
        sourceOffset += inputTensor.descriptor.hStride * (Math.abs(positionY));
    }
    assert sourceOffset >= 0;
    assert destinationOffset >= 0;
    assert sourceOffset + Tensor.length(viewDim) <= Tensor.length(inputDimensions);
    assert destinationOffset + Tensor.length(viewDim) <= Tensor.length(outputDimensions);
    @Nonnull final CudaDevice.CudaTensorDescriptor sourceViewDescriptor = gpu.newTensorDescriptor(// 
    precision, // 
    length, // 
    viewDim[2], // 
    viewDim[1], // 
    viewDim[0], // 
    inputTensor.descriptor.nStride, // 
    inputTensor.descriptor.cStride, // 
    inputTensor.descriptor.hStride, inputTensor.descriptor.wStride);
    CudaMemory inputTensorMemory = inputTensor.getMemory(gpu);
    try {
        if (Arrays.equals(viewDim, outputDimensions)) {
            assert sourceOffset >= 0;
            assert destinationOffset == 0;
            return CudaTensor.wrap(inputTensorMemory.withByteOffset(sourceOffset * precision.size), sourceViewDescriptor, precision);
        }
        @Nonnull final CudaDevice.CudaTensorDescriptor destinationViewDescriptor = gpu.newTensorDescriptor(// 
        precision, // 
        length, // 
        viewDim[2], // 
        viewDim[1], // 
        viewDim[0], // 
        outputDimensions[2] * outputDimensions[1] * outputDimensions[0], // 
        outputDimensions[1] * outputDimensions[0], // 
        outputDimensions[0], 1);
        CudaSystem.handle(gpu.cudnnTransformTensor(precision.getPointer(1.0), sourceViewDescriptor.getPtr(), inputTensorMemory.getPtr().withByteOffset(sourceOffset * precision.size), precision.getPointer(1.0), destinationViewDescriptor.getPtr(), outputPtr.getPtr().withByteOffset(destinationOffset * precision.size)));
        assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
        outputPtr.dirty();
        inputTensorMemory.dirty();
        Stream.<ReferenceCounting>of(sourceViewDescriptor, destinationViewDescriptor).forEach(ReferenceCounting::freeRef);
        @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(// 
        precision, // 
        length, // 
        outputDimensions[2], // 
        outputDimensions[1], // 
        outputDimensions[0], // 
        outputDimensions[2] * outputDimensions[1] * outputDimensions[0], // 
        outputDimensions[1] * outputDimensions[0], // 
        outputDimensions[0], 1);
        Stream.<ReferenceCounting>of(inputTensor).forEach(ReferenceCounting::freeRef);
        return CudaTensor.wrap(outputPtr, passbackDescriptor, precision);
    } finally {
        inputTensorMemory.freeRef();
    }
}
Also used : CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) Nullable(javax.annotation.Nullable)

Example 8 with ReferenceCounting

use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.

the class PoolingLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    final int poolDims = 2;
    @Nonnull final int[] windowSize = { windowX, windowY };
    @Nonnull final int[] padding = { paddingX, paddingY };
    @Nonnull final int[] stride = { strideX, strideY };
    final Result input = inObj[0];
    final TensorList inputData = input.getData();
    @Nonnull final int[] inputSize = inputData.getDimensions();
    final int length = inputData.length();
    final int inputDims = Tensor.length(inputSize);
    @Nonnull final int[] outputSize = new int[4];
    final CudaTensor outputData = CudaSystem.run(gpu -> {
        try {
            gpu.initThread();
            @Nonnull final CudaResource<cudnnPoolingDescriptor> poolingDesc = gpu.createPoolingDescriptor(mode.id, poolDims, windowSize, padding, stride);
            @Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
            CudaSystem.handle(CudaSystem.cudnnGetPoolingNdForwardOutputDim(poolingDesc.getPtr(), inputTensor.descriptor.getPtr(), 4, outputSize));
            assert inputSize[2] == outputSize[1];
            @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, outputSize[0], outputSize[1], outputSize[2], outputSize[3], outputSize[1] * outputSize[2] * outputSize[3], outputSize[2] * outputSize[3], outputSize[3], 1);
            @Nonnull final CudaMemory outputTensor = gpu.allocate((long) precision.size * Tensor.length(outputSize), MemoryType.Managed.normalize(), true);
            CudaMemory inputDataMemory = inputTensor.getMemory(gpu);
            CudaSystem.handle(gpu.cudnnPoolingForward(poolingDesc.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), inputDataMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputTensor.getPtr()));
            assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
            inputDataMemory.dirty();
            outputTensor.dirty();
            Stream.<ReferenceCounting>of(inputTensor, poolingDesc, inputDataMemory).forEach(ReferenceCounting::freeRef);
            return CudaTensor.wrap(outputTensor, outputDescriptor, precision);
        } catch (@Nonnull final Throwable e) {
            throw new ComponentException("Error", e);
        }
    }, inputData);
    return new Result(CudaTensorList.create(outputData, length, new int[] { outputSize[3], outputSize[2], outputSize[1] }, precision), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList error) -> {
        assert error.length() == inputData.length();
        if (input.isAlive()) {
            TensorList data = CudaSystem.run(gpu -> {
                @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(precision, length, inputSize[2], inputSize[1], inputSize[0], inputSize[2] * inputSize[1] * inputSize[0], inputSize[1] * inputSize[0], inputSize[0], 1);
                @Nonnull final CudaResource<cudnnPoolingDescriptor> poolingDesc = gpu.createPoolingDescriptor(mode.id, poolDims, windowSize, padding, stride);
                @Nullable final CudaTensor inputTensor;
                synchronized (gpu) {
                    inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, true);
                }
                @Nullable final CudaTensor errorPtr;
                synchronized (gpu) {
                    errorPtr = gpu.getTensor(error, precision, MemoryType.Device, true);
                }
                @Nonnull final CudaMemory passbackBuffer = gpu.allocate((long) inputDims * precision.size * length, MemoryType.Managed.normalize(), true);
                CudaMemory outputDataMemory = outputData.getMemory(gpu);
                CudaMemory errorPtrMemory = errorPtr.getMemory(gpu);
                CudaMemory inputDataMemory = inputTensor.getMemory(gpu);
                CudaSystem.handle(gpu.cudnnPoolingBackward(poolingDesc.getPtr(), precision.getPointer(this.alpha), outputData.descriptor.getPtr(), outputDataMemory.getPtr(), errorPtr.descriptor.getPtr(), errorPtrMemory.getPtr(), inputTensor.descriptor.getPtr(), inputDataMemory.getPtr(), precision.getPointer(0.0), passbackDescriptor.getPtr(), passbackBuffer.getPtr()));
                outputDataMemory.dirty();
                errorPtrMemory.dirty();
                inputDataMemory.dirty();
                passbackBuffer.dirty();
                Stream.<ReferenceCounting>of(errorPtr, inputTensor, poolingDesc, outputDataMemory, errorPtrMemory, inputDataMemory).forEach(ReferenceCounting::freeRef);
                return CudaTensorList.wrap(CudaTensor.wrap(passbackBuffer, passbackDescriptor, precision), length, inputSize, precision);
            }, error);
            input.accumulate(buffer, data);
        }
    }) {

        @Override
        protected void _free() {
            Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
            inputData.freeRef();
            outputData.freeRef();
        }

        @Override
        public boolean isAlive() {
            return input.isAlive() || !isFrozen();
        }
    };
}
Also used : CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) jcuda.jcudnn.cudnnPoolingDescriptor(jcuda.jcudnn.cudnnPoolingDescriptor) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) ComponentException(com.simiacryptus.mindseye.lang.ComponentException) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Example 9 with ReferenceCounting

use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.

the class SquareActivationLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    if (inObj.length != 1) {
        throw new IllegalArgumentException("inObj.length=" + inObj.length);
    }
    Result input = inObj[0];
    final TensorList inputData = input.getData();
    @Nonnull final int[] dimensions = inputData.getDimensions();
    final int length = inputData.length();
    if (3 != dimensions.length) {
        throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
    }
    return new Result(CudaSystem.run(gpu -> {
        @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
        @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
        @Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
        // assert inputTensor.size == rPtr.size;
        @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
        CudaMemory lPtrMemory = inputTensor.getMemory(gpu);
        CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
        assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
        outputPtr.dirty();
        lPtrMemory.dirty();
        outputPtr.dirty();
        lPtrMemory.freeRef();
        inputTensor.freeRef();
        opDescriptor.freeRef();
        CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
        return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
    }, inputData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        if (input.isAlive()) {
            @Nonnull TensorList data = CudaSystem.run(gpu -> {
                @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
                @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
                @Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
                delta.freeRef();
                @Nullable final CudaTensor inputTensor = gpu.getTensor(input.getData(), precision, MemoryType.Device, false);
                // assert deltaTensor.size == inputTensor.size;
                @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
                CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
                CudaMemory rightTensorMemory = inputTensor.getMemory(gpu);
                CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(2), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), rightTensorMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
                deltaTensorMemory.dirty();
                rightTensorMemory.dirty();
                outputPtr.dirty();
                deltaTensorMemory.freeRef();
                rightTensorMemory.freeRef();
                CudaTensor cudaTensor = new CudaTensor(outputPtr, outputDescriptor, precision);
                Arrays.stream(new ReferenceCounting[] { deltaTensor, inputTensor, opDescriptor, outputDescriptor }).forEach(ReferenceCounting::freeRef);
                outputPtr.freeRef();
                return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
            }, delta);
            input.accumulate(buffer, data);
        } else {
            delta.freeRef();
        }
    }) {

        @Override
        public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
            getAccumulator().accept(buffer, delta);
        }

        @Override
        protected void _free() {
            inputData.freeRef();
            input.freeRef();
        }

        @Override
        public boolean isAlive() {
            for (@Nonnull final Result element : inObj) if (element.isAlive()) {
                return true;
            }
            return false;
        }
    };
}
Also used : CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) jcuda.jcudnn.cudnnOpTensorOp(jcuda.jcudnn.cudnnOpTensorOp) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) Map(java.util.Map) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) ProductInputsLayer(com.simiacryptus.mindseye.layers.java.ProductInputsLayer) Layer(com.simiacryptus.mindseye.lang.Layer) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Example 10 with ReferenceCounting

use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.

the class DropoutNoiseLayer method eval.

@Nonnull
@Override
public Result eval(final Result... inObj) {
    final Result inputResult = inObj[0];
    inputResult.addRef();
    final TensorList inputData = inputResult.getData();
    final int itemCnt = inputData.length();
    final Tensor[] mask = IntStream.range(0, itemCnt).mapToObj(dataIndex -> {
        @Nonnull final Random random = new Random(seed);
        @Nullable final Tensor input = inputData.get(dataIndex);
        @Nullable final Tensor output = input.map(x -> {
            if (seed == -1)
                return 1;
            return random.nextDouble() < getValue() ? 0 : (1.0 / getValue());
        });
        input.freeRef();
        return output;
    }).toArray(i -> new Tensor[i]);
    return new Result(TensorArray.wrap(IntStream.range(0, itemCnt).mapToObj(dataIndex -> {
        Tensor inputTensor = inputData.get(dataIndex);
        @Nullable final double[] input = inputTensor.getData();
        @Nullable final double[] maskT = mask[dataIndex].getData();
        @Nonnull final Tensor output = new Tensor(inputTensor.getDimensions());
        @Nullable final double[] outputData = output.getData();
        for (int i = 0; i < outputData.length; i++) {
            outputData[i] = input[i] * maskT[i];
        }
        inputTensor.freeRef();
        return output;
    }).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        if (inputResult.isAlive()) {
            @Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, delta.length()).mapToObj(dataIndex -> {
                Tensor deltaTensor = delta.get(dataIndex);
                @Nullable final double[] deltaData = deltaTensor.getData();
                @Nullable final double[] maskData = mask[dataIndex].getData();
                @Nonnull final Tensor passback = new Tensor(deltaTensor.getDimensions());
                for (int i = 0; i < passback.length(); i++) {
                    passback.set(i, maskData[i] * deltaData[i]);
                }
                deltaTensor.freeRef();
                return passback;
            }).toArray(i -> new Tensor[i]));
            inputResult.accumulate(buffer, tensorArray);
        }
    }) {

        @Override
        protected void _free() {
            inputResult.freeRef();
            Arrays.stream(mask).forEach(ReferenceCounting::freeRef);
        }

        @Override
        public boolean isAlive() {
            return inputResult.isAlive() || !isFrozen();
        }
    };
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) Logger(org.slf4j.Logger) LoggerFactory(org.slf4j.LoggerFactory) Tensor(com.simiacryptus.mindseye.lang.Tensor) Random(java.util.Random) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) TensorList(com.simiacryptus.mindseye.lang.TensorList) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Tensor(com.simiacryptus.mindseye.lang.Tensor) Nonnull(javax.annotation.Nonnull) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) Random(java.util.Random) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) Nullable(javax.annotation.Nullable) Nonnull(javax.annotation.Nonnull)

Aggregations

ReferenceCounting (com.simiacryptus.mindseye.lang.ReferenceCounting)32 Nonnull (javax.annotation.Nonnull)32 Nullable (javax.annotation.Nullable)30 TensorList (com.simiacryptus.mindseye.lang.TensorList)28 Layer (com.simiacryptus.mindseye.lang.Layer)27 Result (com.simiacryptus.mindseye.lang.Result)26 Arrays (java.util.Arrays)26 Map (java.util.Map)24 DeltaSet (com.simiacryptus.mindseye.lang.DeltaSet)23 List (java.util.List)23 JsonObject (com.google.gson.JsonObject)22 DataSerializer (com.simiacryptus.mindseye.lang.DataSerializer)21 LayerBase (com.simiacryptus.mindseye.lang.LayerBase)21 Tensor (com.simiacryptus.mindseye.lang.Tensor)20 CudaDevice (com.simiacryptus.mindseye.lang.cudnn.CudaDevice)20 CudaMemory (com.simiacryptus.mindseye.lang.cudnn.CudaMemory)20 IntStream (java.util.stream.IntStream)20 CudaTensor (com.simiacryptus.mindseye.lang.cudnn.CudaTensor)19 CudaTensorList (com.simiacryptus.mindseye.lang.cudnn.CudaTensorList)18 Stream (java.util.stream.Stream)18