Search in sources :

Example 6 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class CudaLayerTester method testNonstandardBoundsBackprop.

/**
 * Test nonstandard bounds backprop tolerance statistics.
 *
 * @param log            the log
 * @param layer          the layer
 * @param inputPrototype the input prototype
 * @return the tolerance statistics
 */
@Nonnull
public ToleranceStatistics testNonstandardBoundsBackprop(final NotebookOutput log, @Nullable final Layer layer, @Nonnull final Tensor[] inputPrototype) {
    log.h2("Irregular Backprop");
    log.p("This layer should accept non-dense tensors as delta input.");
    return log.code(() -> {
        Tensor[] randomized = Arrays.stream(inputPrototype).map(x -> x.map(v -> getRandom())).toArray(i -> new Tensor[i]);
        logger.info("Input: " + Arrays.stream(randomized).map(Tensor::prettyPrint).collect(Collectors.toList()));
        Precision precision = Precision.Double;
        TensorList[] controlInput = Arrays.stream(randomized).map(original -> {
            return TensorArray.wrap(original);
        }).toArray(i -> new TensorList[i]);
        @Nonnull final SimpleResult testResult = CudaSystem.run(gpu -> {
            TensorList[] copy = copy(controlInput);
            SimpleResult result = new SimpleGpuEval(layer, gpu, copy) {

                @Nonnull
                @Override
                public TensorList getFeedback(@Nonnull final TensorList original) {
                    Tensor originalTensor = original.get(0).mapAndFree(x -> 1);
                    CudaTensorList cudaTensorList = buildIrregularCudaTensor(gpu, precision, originalTensor);
                    originalTensor.freeRef();
                    return cudaTensorList;
                }
            }.call();
            Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
            return result;
        });
        @Nonnull final SimpleResult controlResult = CudaSystem.run(gpu -> {
            TensorList[] copy = copy(controlInput);
            SimpleResult result = SimpleGpuEval.run(layer, gpu, copy);
            Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
            return result;
        }, 1);
        try {
            ToleranceStatistics compareOutput = compareOutput(controlResult, testResult);
            ToleranceStatistics compareDerivatives = compareDerivatives(controlResult, testResult);
            return compareDerivatives.combine(compareOutput);
        } finally {
            Arrays.stream(controlInput).forEach(ReferenceCounting::freeRef);
            controlResult.freeRef();
            testResult.freeRef();
        }
    });
}
Also used : IntStream(java.util.stream.IntStream) SimpleResult(com.simiacryptus.mindseye.test.SimpleResult) SimpleGpuEval(com.simiacryptus.mindseye.test.SimpleGpuEval) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) LoggerFactory(org.slf4j.LoggerFactory) Tensor(com.simiacryptus.mindseye.lang.Tensor) ReferenceCountingBase(com.simiacryptus.mindseye.lang.ReferenceCountingBase) Random(java.util.Random) Function(java.util.function.Function) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) CudnnHandle(com.simiacryptus.mindseye.lang.cudnn.CudnnHandle) Layer(com.simiacryptus.mindseye.lang.Layer) NotebookOutput(com.simiacryptus.util.io.NotebookOutput) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) IntFunction(java.util.function.IntFunction) Logger(org.slf4j.Logger) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) Collectors(java.util.stream.Collectors) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) ToleranceStatistics(com.simiacryptus.mindseye.test.ToleranceStatistics) TensorList(com.simiacryptus.mindseye.lang.TensorList) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) Tensor(com.simiacryptus.mindseye.lang.Tensor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) Nonnull(javax.annotation.Nonnull) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) SimpleGpuEval(com.simiacryptus.mindseye.test.SimpleGpuEval) SimpleResult(com.simiacryptus.mindseye.test.SimpleResult) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) ToleranceStatistics(com.simiacryptus.mindseye.test.ToleranceStatistics) Nonnull(javax.annotation.Nonnull)

Example 7 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class ProductLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    if (inObj.length != 2) {
        throw new IllegalArgumentException("inObj.length=" + inObj.length);
    }
    Result left = inObj[0];
    Result right = inObj[1];
    final TensorList leftData = left.getData();
    final TensorList rightData = right.getData();
    @Nonnull final int[] leftDimensions = leftData.getDimensions();
    @Nonnull final int[] rightDimensions = rightData.getDimensions();
    final int length = leftData.length();
    if (3 != leftDimensions.length) {
        throw new IllegalArgumentException("dimensions=" + Arrays.toString(leftDimensions));
    }
    return new Result(CudaSystem.run(gpu -> {
        @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
        @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, leftDimensions[2], leftDimensions[1], leftDimensions[0], leftDimensions[2] * leftDimensions[1] * leftDimensions[0], leftDimensions[1] * leftDimensions[0], leftDimensions[0], 1);
        @Nullable final CudaTensor lPtr = gpu.getTensor(leftData, precision, MemoryType.Device, false);
        @Nullable final CudaTensor rPtr = gpu.getTensor(rightData, precision, MemoryType.Device, false);
        // assert lPtr.size == rPtr.size;
        @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
        CudaMemory lPtrMemory = lPtr.getMemory(gpu);
        CudaMemory rPtrMemory = rPtr.getMemory(gpu);
        CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(1.0), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
        assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
        lPtrMemory.dirty();
        rPtrMemory.dirty();
        outputPtr.dirty();
        lPtrMemory.freeRef();
        rPtrMemory.freeRef();
        rPtr.freeRef();
        lPtr.freeRef();
        opDescriptor.freeRef();
        CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
        return CudaTensorList.wrap(cudaTensor, length, leftDimensions, precision);
    }, leftData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        if (left.isAlive()) {
            @Nonnull TensorList data = CudaSystem.run(gpu -> {
                @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
                @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, leftDimensions[2], leftDimensions[1], leftDimensions[0], leftDimensions[2] * leftDimensions[1] * leftDimensions[0], leftDimensions[1] * leftDimensions[0], leftDimensions[0], 1);
                @Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
                @Nullable final CudaTensor rightTensor = gpu.getTensor(right.getData(), precision, MemoryType.Device, false);
                // assert deltaTensor.size == rightTensor.size;
                @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
                CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
                CudaMemory rightTensorMemory = rightTensor.getMemory(gpu);
                CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(1.0), rightTensor.descriptor.getPtr(), rightTensorMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
                deltaTensorMemory.dirty();
                rightTensorMemory.dirty();
                outputPtr.dirty();
                deltaTensorMemory.freeRef();
                rightTensorMemory.freeRef();
                CudaTensor cudaTensor = new CudaTensor(outputPtr, outputDescriptor, precision);
                Arrays.stream(new ReferenceCounting[] { deltaTensor, rightTensor, opDescriptor, outputDescriptor }).forEach(ReferenceCounting::freeRef);
                outputPtr.freeRef();
                return CudaTensorList.wrap(cudaTensor, length, leftDimensions, precision);
            }, delta);
            left.accumulate(buffer, data);
        }
        if (right.isAlive()) {
            @Nonnull TensorList data = CudaSystem.run(gpu -> {
                @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
                @Nonnull final CudaDevice.CudaTensorDescriptor expandedDescriptor = gpu.newTensorDescriptor(precision, length, leftDimensions[2], leftDimensions[1], leftDimensions[0], leftDimensions[2] * leftDimensions[1] * leftDimensions[0], leftDimensions[1] * leftDimensions[0], leftDimensions[0], 1);
                @Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
                delta.freeRef();
                @Nullable final CudaTensor leftTensor = gpu.getTensor(left.getData(), precision, MemoryType.Device, false);
                // assert deltaTensor.size == rightTensor.size;
                @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * expandedDescriptor.nStride * length, MemoryType.Device, true);
                CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
                CudaMemory leftTensorMemory = leftTensor.getMemory(gpu);
                CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(1.0), leftTensor.descriptor.getPtr(), leftTensorMemory.getPtr(), precision.getPointer(0.0), expandedDescriptor.getPtr(), outputPtr.getPtr()));
                deltaTensorMemory.dirty();
                leftTensorMemory.dirty();
                outputPtr.dirty();
                if (Arrays.equals(rightDimensions, leftDimensions) && length == rightData.length()) {
                    deltaTensorMemory.freeRef();
                    leftTensorMemory.freeRef();
                    assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
                    outputPtr.dirty();
                    CudaTensor cudaTensor = new CudaTensor(outputPtr, expandedDescriptor, precision);
                    Stream.of(deltaTensor, leftTensor, opDescriptor, expandedDescriptor, outputPtr).forEach(ReferenceCounting::freeRef);
                    CudaTensorList tensorList = CudaTensorList.wrap(cudaTensor, length, rightDimensions, precision);
                    return tensorList;
                } else {
                    @Nonnull final CudaDevice.CudaTensorDescriptor reducedOutputDescriptor = gpu.newTensorDescriptor(precision, rightData.length(), rightDimensions[2], rightDimensions[1], rightDimensions[0], rightDimensions[2] * rightDimensions[1] * rightDimensions[0], rightDimensions[1] * rightDimensions[0], rightDimensions[0], 1);
                    long size = (long) precision.size * reducedOutputDescriptor.nStride * rightData.length();
                    @Nonnull final CudaMemory reducedOutputPtr = gpu.allocate(size, MemoryType.Managed, true);
                    CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
                    @Nonnull final CudaMemory workspacePtr = gpu.allocate(outputPtr.size, MemoryType.Device, true);
                    @Nonnull final CudaMemory indexPtr = gpu.allocate(3, MemoryType.Device, false);
                    // outputPtr.synchronize();
                    gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(1.0), expandedDescriptor.getPtr(), outputPtr.getPtr(), precision.getPointer(0.0), reducedOutputDescriptor.getPtr(), reducedOutputPtr.getPtr());
                    reducedOutputPtr.dirty();
                    workspacePtr.dirty();
                    outputPtr.dirty();
                    deltaTensorMemory.freeRef();
                    leftTensorMemory.freeRef();
                    CudaTensor cudaTensor = new CudaTensor(reducedOutputPtr, reducedOutputDescriptor, precision);
                    Stream.of(deltaTensor, leftTensor, opDescriptor, expandedDescriptor, outputPtr, reducedOutputPtr, reducedOutputDescriptor, reduceTensorDescriptor, workspacePtr, indexPtr).forEach(ReferenceCounting::freeRef);
                    CudaTensorList tensorList = CudaTensorList.wrap(cudaTensor, rightData.length(), rightDimensions, precision);
                    return tensorList;
                }
            }, delta);
            right.accumulate(buffer, data);
        } else {
            delta.freeRef();
        }
    }) {

        @Override
        public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
            getAccumulator().accept(buffer, delta);
        }

        @Override
        protected void _free() {
            leftData.freeRef();
            rightData.freeRef();
            left.freeRef();
            right.freeRef();
        }

        @Override
        public boolean isAlive() {
            for (@Nonnull final Result element : inObj) if (element.isAlive()) {
                return true;
            }
            return false;
        }
    };
}
Also used : JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) jcuda.jcudnn.cudnnReduceTensorOp(jcuda.jcudnn.cudnnReduceTensorOp) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) jcuda.jcudnn.cudnnOpTensorOp(jcuda.jcudnn.cudnnOpTensorOp) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) jcuda.jcudnn.cudnnIndicesType(jcuda.jcudnn.cudnnIndicesType) jcuda.jcudnn.cudnnNanPropagation(jcuda.jcudnn.cudnnNanPropagation) jcuda.jcudnn.cudnnReduceTensorIndices(jcuda.jcudnn.cudnnReduceTensorIndices) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) ProductInputsLayer(com.simiacryptus.mindseye.layers.java.ProductInputsLayer) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Example 8 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class SumReducerLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    final Result input = inObj[0];
    final TensorList inputData = input.getData();
    @Nonnull final int[] inputSize = inputData.getDimensions();
    int length = inputData.length();
    CudaTensorList result = CudaSystem.run(gpu -> {
        CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
        inputData.freeRef();
        CudaMemory inputMemory = inputTensor.getMemory(gpu);
        @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, 1, 1, 1);
        long size = (long) precision.size * outputDescriptor.nStride * length;
        @Nonnull final CudaMemory outputMemory = gpu.allocate(size, MemoryType.Managed, true);
        CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
        @Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
        @Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
        // outputPtr.synchronize();
        gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputMemory.getPtr());
        inputMemory.dirty();
        outputMemory.dirty();
        workspacePtr.dirty();
        Stream.of(inputTensor, inputMemory, reduceTensorDescriptor, workspacePtr, indexPtr).forEach(ReferenceCounting::freeRef);
        return CudaTensorList.wrap(CudaTensor.wrap(outputMemory, outputDescriptor, precision), length, new int[] { 1, 1, 1 }, precision);
    });
    return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
        // Not supported by CuDNN?
        // CudaTensorList passback = CudaSystem.run(gpu -> {
        // CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
        // CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
        // 
        // @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor1 = gpu.newTensorDescriptor(
        // precision, length, inputSize[2], inputSize[1], inputSize[0]
        // );
        // @Nonnull final CudaMemory passbackPtr1 = gpu.allocate((long) precision.size * passbackDescriptor1.nStride * length, MemoryType.Device, false);
        // gpu.cudnnAddTensor(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaMemory.getPtr(),
        // precision.getPointer(1.0), passbackDescriptor1.getPtr(), passbackPtr1.getPtr());
        // passbackPtr1.dirty();
        // 
        // Stream.of(deltaTensor, deltaMemory, passbackDescriptor1, passbackPtr1).forEach(ReferenceCounting::freeRef);
        // return CudaTensorList.wrap(CudaTensor.wrap(passbackPtr1, passbackDescriptor1, precision), length, inputSize, precision);
        // });
        TensorList passback = TensorArray.wrap(IntStream.range(0, length).mapToObj(i -> {
            Tensor tensor = delta.get(i);
            Tensor tensor1 = new Tensor(inputSize).setAll(tensor.get(0));
            tensor.freeRef();
            return tensor1;
        }).toArray(i -> new Tensor[i]));
        input.accumulate(ctx, passback);
    }) {

        @Override
        protected void _free() {
            super._free();
            input.freeRef();
        }
    };
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) Tensor(com.simiacryptus.mindseye.lang.Tensor) jcuda.jcudnn.cudnnReduceTensorOp(jcuda.jcudnn.cudnnReduceTensorOp) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) jcuda.jcudnn.cudnnIndicesType(jcuda.jcudnn.cudnnIndicesType) jcuda.jcudnn.cudnnNanPropagation(jcuda.jcudnn.cudnnNanPropagation) jcuda.jcudnn.cudnnReduceTensorIndices(jcuda.jcudnn.cudnnReduceTensorIndices) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) Tensor(com.simiacryptus.mindseye.lang.Tensor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) jcuda.jcudnn.cudnnReduceTensorDescriptor(jcuda.jcudnn.cudnnReduceTensorDescriptor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nullable(javax.annotation.Nullable)

Example 9 with CudaTensorList

use of com.simiacryptus.mindseye.lang.cudnn.CudaTensorList in project MindsEye by SimiaCryptus.

the class CudaLayerTester method testNonstandardBounds.

/**
 * Test nonstandard bounds tolerance statistics.
 *
 * @param log            the log
 * @param reference      the reference
 * @param inputPrototype the input prototype
 * @return the tolerance statistics
 */
@Nonnull
public ToleranceStatistics testNonstandardBounds(final NotebookOutput log, @Nullable final Layer reference, @Nonnull final Tensor[] inputPrototype) {
    log.h2("Irregular Input");
    log.p("This layer should be able to accept non-dense inputs.");
    return log.code(() -> {
        Tensor[] randomized = Arrays.stream(inputPrototype).map(x -> x.map(v -> getRandom())).toArray(i -> new Tensor[i]);
        logger.info("Input: " + Arrays.stream(randomized).map(Tensor::prettyPrint).collect(Collectors.toList()));
        Precision precision = Precision.Double;
        TensorList[] controlInput = CudaSystem.run(gpu -> {
            return Arrays.stream(randomized).map(original -> {
                TensorArray data = TensorArray.create(original);
                CudaTensorList wrap = CudaTensorList.wrap(gpu.getTensor(data, precision, MemoryType.Managed, false), 1, original.getDimensions(), precision);
                data.freeRef();
                return wrap;
            }).toArray(i -> new TensorList[i]);
        }, 0);
        @Nonnull final SimpleResult controlResult = CudaSystem.run(gpu -> {
            return SimpleGpuEval.run(reference, gpu, controlInput);
        }, 1);
        final TensorList[] irregularInput = CudaSystem.run(gpu -> {
            return Arrays.stream(randomized).map(original -> {
                return buildIrregularCudaTensor(gpu, precision, original);
            }).toArray(i -> new TensorList[i]);
        }, 0);
        @Nonnull final SimpleResult testResult = CudaSystem.run(gpu -> {
            return SimpleGpuEval.run(reference, gpu, irregularInput);
        }, 1);
        try {
            ToleranceStatistics compareOutput = compareOutput(controlResult, testResult);
            ToleranceStatistics compareDerivatives = compareDerivatives(controlResult, testResult);
            return compareDerivatives.combine(compareOutput);
        } finally {
            Arrays.stream(randomized).forEach(ReferenceCountingBase::freeRef);
            Arrays.stream(controlInput).forEach(ReferenceCounting::freeRef);
            Arrays.stream(irregularInput).forEach(x -> x.freeRef());
            controlResult.freeRef();
            testResult.freeRef();
        }
    });
}
Also used : IntStream(java.util.stream.IntStream) SimpleResult(com.simiacryptus.mindseye.test.SimpleResult) SimpleGpuEval(com.simiacryptus.mindseye.test.SimpleGpuEval) Arrays(java.util.Arrays) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) LoggerFactory(org.slf4j.LoggerFactory) Tensor(com.simiacryptus.mindseye.lang.Tensor) ReferenceCountingBase(com.simiacryptus.mindseye.lang.ReferenceCountingBase) Random(java.util.Random) Function(java.util.function.Function) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) CudnnHandle(com.simiacryptus.mindseye.lang.cudnn.CudnnHandle) Layer(com.simiacryptus.mindseye.lang.Layer) NotebookOutput(com.simiacryptus.util.io.NotebookOutput) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) IntFunction(java.util.function.IntFunction) Logger(org.slf4j.Logger) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) Collectors(java.util.stream.Collectors) Stream(java.util.stream.Stream) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) ToleranceStatistics(com.simiacryptus.mindseye.test.ToleranceStatistics) TensorList(com.simiacryptus.mindseye.lang.TensorList) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) ReferenceCountingBase(com.simiacryptus.mindseye.lang.ReferenceCountingBase) Tensor(com.simiacryptus.mindseye.lang.Tensor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) Nonnull(javax.annotation.Nonnull) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) SimpleResult(com.simiacryptus.mindseye.test.SimpleResult) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) ToleranceStatistics(com.simiacryptus.mindseye.test.ToleranceStatistics) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) Nonnull(javax.annotation.Nonnull)

Aggregations

CudaDevice (com.simiacryptus.mindseye.lang.cudnn.CudaDevice)9 CudaMemory (com.simiacryptus.mindseye.lang.cudnn.CudaMemory)9 CudaTensorList (com.simiacryptus.mindseye.lang.cudnn.CudaTensorList)9 Nonnull (javax.annotation.Nonnull)9 Nullable (javax.annotation.Nullable)9 Layer (com.simiacryptus.mindseye.lang.Layer)8 TensorList (com.simiacryptus.mindseye.lang.TensorList)8 CudaSystem (com.simiacryptus.mindseye.lang.cudnn.CudaSystem)8 CudaTensor (com.simiacryptus.mindseye.lang.cudnn.CudaTensor)8 MemoryType (com.simiacryptus.mindseye.lang.cudnn.MemoryType)8 Precision (com.simiacryptus.mindseye.lang.cudnn.Precision)8 Arrays (java.util.Arrays)8 ReferenceCounting (com.simiacryptus.mindseye.lang.ReferenceCounting)7 Stream (java.util.stream.Stream)7 JsonObject (com.google.gson.JsonObject)6 DataSerializer (com.simiacryptus.mindseye.lang.DataSerializer)6 DeltaSet (com.simiacryptus.mindseye.lang.DeltaSet)6 LayerBase (com.simiacryptus.mindseye.lang.LayerBase)6 Result (com.simiacryptus.mindseye.lang.Result)6 Tensor (com.simiacryptus.mindseye.lang.Tensor)6