Search in sources :

Example 11 with Layer

use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.

the class MonitoringWrapperLayer method evalAndFree.

@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
    @Nonnull final AtomicLong passbackNanos = new AtomicLong(0);
    final Result[] wrappedInput = Arrays.stream(inObj).map(result -> {
        return new Result(result.getData(), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList data) -> {
            data.addRef();
            passbackNanos.addAndGet(TimedResult.time(() -> result.accumulate(buffer, data)).timeNanos);
        }) {

            @Override
            protected void _free() {
                result.freeRef();
            }

            @Override
            public boolean isAlive() {
                return result.isAlive();
            }
        };
    }).toArray(i -> new Result[i]);
    @Nonnull TimedResult<Result> timedResult = TimedResult.time(() -> getInner().evalAndFree(wrappedInput));
    final Result output = timedResult.result;
    forwardPerformance.add((timedResult.timeNanos) / 1000000000.0);
    totalBatches++;
    final int items = Arrays.stream(inObj).mapToInt(x -> x.getData().length()).max().orElse(1);
    totalItems += items;
    if (recordSignalMetrics) {
        forwardSignal.clear();
        output.getData().stream().parallel().forEach(t -> {
            forwardSignal.add(t.getData());
            t.freeRef();
        });
    }
    return new Result(output.getData(), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList data) -> {
        if (recordSignalMetrics) {
            backwardSignal.clear();
            data.stream().parallel().forEach(t -> {
                backwardSignal.add(t.getData());
                t.freeRef();
            });
        }
        data.addRef();
        backwardPerformance.add((TimedResult.time(() -> output.accumulate(buffer, data)).timeNanos - passbackNanos.getAndSet(0)) / (items * 1e9));
    }) {

        @Override
        protected void _free() {
            output.freeRef();
        }

        @Override
        public boolean isAlive() {
            return output.isAlive();
        }
    };
}
Also used : JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) HashMap(java.util.HashMap) Result(com.simiacryptus.mindseye.lang.Result) MonitoredItem(com.simiacryptus.util.MonitoredItem) PercentileStatistics(com.simiacryptus.util.data.PercentileStatistics) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) MonitoredObject(com.simiacryptus.util.MonitoredObject) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) ScalarStatistics(com.simiacryptus.util.data.ScalarStatistics) TensorList(com.simiacryptus.mindseye.lang.TensorList) Map(java.util.Map) TimedResult(com.simiacryptus.util.lang.TimedResult) Layer(com.simiacryptus.mindseye.lang.Layer) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) AtomicLong(java.util.concurrent.atomic.AtomicLong) Nonnull(javax.annotation.Nonnull) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) TensorList(com.simiacryptus.mindseye.lang.TensorList) Layer(com.simiacryptus.mindseye.lang.Layer) Result(com.simiacryptus.mindseye.lang.Result) TimedResult(com.simiacryptus.util.lang.TimedResult)

Example 12 with Layer

use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.

the class FullyConnectedLayer method eval.

@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
    final TensorList indata = inObj[0].getData();
    indata.addRef();
    for (@Nonnull Result result : inObj) {
        result.addRef();
    }
    FullyConnectedLayer.this.addRef();
    assert Tensor.length(indata.getDimensions()) == Tensor.length(this.inputDims) : Arrays.toString(indata.getDimensions()) + " == " + Arrays.toString(this.inputDims);
    @Nonnull DoubleMatrix doubleMatrix = new DoubleMatrix(Tensor.length(indata.getDimensions()), Tensor.length(outputDims), this.weights.getData());
    @Nonnull final DoubleMatrix matrixObj = FullyConnectedLayer.transpose(doubleMatrix);
    @Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, indata.length()).parallel().mapToObj(dataIndex -> {
        @Nullable final Tensor input = indata.get(dataIndex);
        @Nullable final Tensor output = new Tensor(outputDims);
        matrixObj.mmuli(new DoubleMatrix(input.length(), 1, input.getData()), new DoubleMatrix(output.length(), 1, output.getData()));
        input.freeRef();
        return output;
    }).toArray(i -> new Tensor[i]));
    RecycleBin.DOUBLES.recycle(matrixObj.data, matrixObj.data.length);
    this.weights.addRef();
    return new Result(tensorArray, (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        if (!isFrozen()) {
            final Delta<Layer> deltaBuffer = buffer.get(FullyConnectedLayer.this, this.weights.getData());
            final int threads = 4;
            IntStream.range(0, threads).parallel().mapToObj(x -> x).flatMap(thread -> {
                @Nullable Stream<Tensor> stream = IntStream.range(0, indata.length()).filter(i -> thread == i % threads).mapToObj(dataIndex -> {
                    @Nonnull final Tensor weightDelta = new Tensor(Tensor.length(inputDims), Tensor.length(outputDims));
                    Tensor deltaTensor = delta.get(dataIndex);
                    Tensor inputTensor = indata.get(dataIndex);
                    FullyConnectedLayer.crossMultiplyT(deltaTensor.getData(), inputTensor.getData(), weightDelta.getData());
                    inputTensor.freeRef();
                    deltaTensor.freeRef();
                    return weightDelta;
                });
                return stream;
            }).reduce((a, b) -> {
                @Nullable Tensor c = a.addAndFree(b);
                b.freeRef();
                return c;
            }).map(data -> {
                @Nonnull Delta<Layer> layerDelta = deltaBuffer.addInPlace(data.getData());
                data.freeRef();
                return layerDelta;
            });
            deltaBuffer.freeRef();
        }
        if (inObj[0].isAlive()) {
            @Nonnull final TensorList tensorList = TensorArray.wrap(IntStream.range(0, indata.length()).parallel().mapToObj(dataIndex -> {
                Tensor deltaTensor = delta.get(dataIndex);
                @Nonnull final Tensor passback = new Tensor(indata.getDimensions());
                FullyConnectedLayer.multiply(this.weights.getData(), deltaTensor.getData(), passback.getData());
                deltaTensor.freeRef();
                return passback;
            }).toArray(i -> new Tensor[i]));
            inObj[0].accumulate(buffer, tensorList);
        }
    }) {

        @Override
        protected void _free() {
            indata.freeRef();
            FullyConnectedLayer.this.freeRef();
            for (@Nonnull Result result : inObj) {
                result.freeRef();
            }
            FullyConnectedLayer.this.weights.freeRef();
        }

        @Override
        public boolean isAlive() {
            return !isFrozen() || Arrays.stream(inObj).anyMatch(x -> x.isAlive());
        }
    };
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) Coordinate(com.simiacryptus.mindseye.lang.Coordinate) Arrays(java.util.Arrays) LoggerFactory(org.slf4j.LoggerFactory) Tensor(com.simiacryptus.mindseye.lang.Tensor) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) JsonUtil(com.simiacryptus.util.io.JsonUtil) Delta(com.simiacryptus.mindseye.lang.Delta) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) DoubleMatrix(org.jblas.DoubleMatrix) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Util(com.simiacryptus.util.Util) Logger(org.slf4j.Logger) IntToDoubleFunction(java.util.function.IntToDoubleFunction) FastRandom(com.simiacryptus.util.FastRandom) ToDoubleBiFunction(java.util.function.ToDoubleBiFunction) RecycleBin(com.simiacryptus.mindseye.lang.RecycleBin) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) Stream(java.util.stream.Stream) ToDoubleFunction(java.util.function.ToDoubleFunction) TensorList(com.simiacryptus.mindseye.lang.TensorList) DoubleSupplier(java.util.function.DoubleSupplier) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) Tensor(com.simiacryptus.mindseye.lang.Tensor) Nonnull(javax.annotation.Nonnull) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) TensorList(com.simiacryptus.mindseye.lang.TensorList) Layer(com.simiacryptus.mindseye.lang.Layer) Result(com.simiacryptus.mindseye.lang.Result) DoubleMatrix(org.jblas.DoubleMatrix) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) Nullable(javax.annotation.Nullable) Nonnull(javax.annotation.Nonnull)

Example 13 with Layer

use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.

the class HyperbolicActivationLayer method eval.

@Nonnull
@Override
public Result eval(final Result... inObj) {
    final TensorList indata = inObj[0].getData();
    indata.addRef();
    inObj[0].addRef();
    weights.addRef();
    HyperbolicActivationLayer.this.addRef();
    final int itemCnt = indata.length();
    return new Result(TensorArray.wrap(IntStream.range(0, itemCnt).mapToObj(dataIndex -> {
        @Nullable final Tensor input = indata.get(dataIndex);
        @Nullable Tensor map = input.map(v -> {
            final int sign = v < 0 ? negativeMode : 1;
            final double a = Math.max(0, weights.get(v < 0 ? 1 : 0));
            return sign * (Math.sqrt(Math.pow(a * v, 2) + 1) - a) / a;
        });
        input.freeRef();
        return map;
    }).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        if (!isFrozen()) {
            IntStream.range(0, delta.length()).forEach(dataIndex -> {
                @Nullable Tensor deltaI = delta.get(dataIndex);
                @Nullable Tensor inputI = indata.get(dataIndex);
                @Nullable final double[] deltaData = deltaI.getData();
                @Nullable final double[] inputData = inputI.getData();
                @Nonnull final Tensor weightDelta = new Tensor(weights.getDimensions());
                for (int i = 0; i < deltaData.length; i++) {
                    final double d = deltaData[i];
                    final double x = inputData[i];
                    final int sign = x < 0 ? negativeMode : 1;
                    final double a = Math.max(0, weights.getData()[x < 0 ? 1 : 0]);
                    weightDelta.add(x < 0 ? 1 : 0, -sign * d / (a * a * Math.sqrt(1 + Math.pow(a * x, 2))));
                }
                deltaI.freeRef();
                inputI.freeRef();
                buffer.get(HyperbolicActivationLayer.this, weights.getData()).addInPlace(weightDelta.getData()).freeRef();
                weightDelta.freeRef();
            });
        }
        if (inObj[0].isAlive()) {
            @Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, delta.length()).mapToObj(dataIndex -> {
                @Nullable Tensor inputTensor = indata.get(dataIndex);
                Tensor deltaTensor = delta.get(dataIndex);
                @Nullable final double[] deltaData = deltaTensor.getData();
                @Nonnull final int[] dims = indata.getDimensions();
                @Nonnull final Tensor passback = new Tensor(dims);
                for (int i = 0; i < passback.length(); i++) {
                    final double x = inputTensor.getData()[i];
                    final double d = deltaData[i];
                    final int sign = x < 0 ? negativeMode : 1;
                    final double a = Math.max(0, weights.getData()[x < 0 ? 1 : 0]);
                    passback.set(i, sign * d * a * x / Math.sqrt(1 + a * x * a * x));
                }
                deltaTensor.freeRef();
                inputTensor.freeRef();
                return passback;
            }).toArray(i -> new Tensor[i]));
            inObj[0].accumulate(buffer, tensorArray);
        }
    }) {

        @Override
        protected void _free() {
            indata.freeRef();
            inObj[0].freeRef();
            weights.freeRef();
            HyperbolicActivationLayer.this.freeRef();
        }

        @Override
        public boolean isAlive() {
            return inObj[0].isAlive() || !isFrozen();
        }
    };
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) Logger(org.slf4j.Logger) LoggerFactory(org.slf4j.LoggerFactory) Tensor(com.simiacryptus.mindseye.lang.Tensor) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) TensorList(com.simiacryptus.mindseye.lang.TensorList) Map(java.util.Map) Layer(com.simiacryptus.mindseye.lang.Layer) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Tensor(com.simiacryptus.mindseye.lang.Tensor) Nonnull(javax.annotation.Nonnull) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) TensorList(com.simiacryptus.mindseye.lang.TensorList) Nullable(javax.annotation.Nullable) Result(com.simiacryptus.mindseye.lang.Result) Nonnull(javax.annotation.Nonnull)

Example 14 with Layer

use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.

the class ImgBandScaleLayer method eval.

/**
 * Eval nn result.
 *
 * @param input the input
 * @return the nn result
 */
@Nonnull
public Result eval(@Nonnull final Result input) {
    @Nullable final double[] weights = getWeights();
    final TensorList inData = input.getData();
    inData.addRef();
    input.addRef();
    @Nullable Function<Tensor, Tensor> tensorTensorFunction = tensor -> {
        if (tensor.getDimensions().length != 3) {
            throw new IllegalArgumentException(Arrays.toString(tensor.getDimensions()));
        }
        if (tensor.getDimensions()[2] != weights.length) {
            throw new IllegalArgumentException(String.format("%s: %s does not have %s bands", getName(), Arrays.toString(tensor.getDimensions()), weights.length));
        }
        @Nullable Tensor tensor1 = tensor.mapCoords(c -> tensor.get(c) * weights[c.getCoords()[2]]);
        tensor.freeRef();
        return tensor1;
    };
    Tensor[] data = inData.stream().parallel().map(tensorTensorFunction).toArray(i -> new Tensor[i]);
    return new Result(TensorArray.wrap(data), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        if (!isFrozen()) {
            final Delta<Layer> deltaBuffer = buffer.get(ImgBandScaleLayer.this, weights);
            IntStream.range(0, delta.length()).forEach(index -> {
                @Nonnull int[] dimensions = delta.getDimensions();
                int z = dimensions[2];
                int y = dimensions[1];
                int x = dimensions[0];
                final double[] array = RecycleBin.DOUBLES.obtain(z);
                Tensor deltaTensor = delta.get(index);
                @Nullable final double[] deltaArray = deltaTensor.getData();
                Tensor inputTensor = inData.get(index);
                @Nullable final double[] inputData = inputTensor.getData();
                for (int i = 0; i < z; i++) {
                    for (int j = 0; j < y * x; j++) {
                        // array[i] += deltaArray[i + z * j];
                        array[i] += deltaArray[i * x * y + j] * inputData[i * x * y + j];
                    }
                }
                inputTensor.freeRef();
                deltaTensor.freeRef();
                assert Arrays.stream(array).allMatch(v -> Double.isFinite(v));
                deltaBuffer.addInPlace(array);
                RecycleBin.DOUBLES.recycle(array, array.length);
            });
            deltaBuffer.freeRef();
        }
        if (input.isAlive()) {
            Tensor[] tensors = delta.stream().map(t -> {
                @Nullable Tensor tensor = t.mapCoords((c) -> t.get(c) * weights[c.getCoords()[2]]);
                t.freeRef();
                return tensor;
            }).toArray(i -> new Tensor[i]);
            @Nonnull TensorArray tensorArray = TensorArray.wrap(tensors);
            input.accumulate(buffer, tensorArray);
        }
    }) {

        @Override
        protected void _free() {
            inData.freeRef();
            input.freeRef();
        }

        @Override
        public boolean isAlive() {
            return input.isAlive() || !isFrozen();
        }
    };
}
Also used : IntStream(java.util.stream.IntStream) JsonObject(com.google.gson.JsonObject) Util(com.simiacryptus.util.Util) Arrays(java.util.Arrays) Logger(org.slf4j.Logger) IntToDoubleFunction(java.util.function.IntToDoubleFunction) LoggerFactory(org.slf4j.LoggerFactory) Tensor(com.simiacryptus.mindseye.lang.Tensor) Result(com.simiacryptus.mindseye.lang.Result) Function(java.util.function.Function) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) JsonUtil(com.simiacryptus.util.io.JsonUtil) Delta(com.simiacryptus.mindseye.lang.Delta) RecycleBin(com.simiacryptus.mindseye.lang.RecycleBin) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) TensorList(com.simiacryptus.mindseye.lang.TensorList) Map(java.util.Map) DoubleSupplier(java.util.function.DoubleSupplier) Layer(com.simiacryptus.mindseye.lang.Layer) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Tensor(com.simiacryptus.mindseye.lang.Tensor) Nonnull(javax.annotation.Nonnull) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) TensorList(com.simiacryptus.mindseye.lang.TensorList) Layer(com.simiacryptus.mindseye.lang.Layer) Result(com.simiacryptus.mindseye.lang.Result) TensorArray(com.simiacryptus.mindseye.lang.TensorArray) Nullable(javax.annotation.Nullable) Nonnull(javax.annotation.Nonnull)

Example 15 with Layer

use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.

the class SquareActivationLayer method evalAndFree.

@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
    if (!CudaSystem.isEnabled())
        return getCompatibilityLayer().evalAndFree(inObj);
    if (inObj.length != 1) {
        throw new IllegalArgumentException("inObj.length=" + inObj.length);
    }
    Result input = inObj[0];
    final TensorList inputData = input.getData();
    @Nonnull final int[] dimensions = inputData.getDimensions();
    final int length = inputData.length();
    if (3 != dimensions.length) {
        throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
    }
    return new Result(CudaSystem.run(gpu -> {
        @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
        @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
        @Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
        // assert inputTensor.size == rPtr.size;
        @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
        CudaMemory lPtrMemory = inputTensor.getMemory(gpu);
        CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
        assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
        outputPtr.dirty();
        lPtrMemory.dirty();
        outputPtr.dirty();
        lPtrMemory.freeRef();
        inputTensor.freeRef();
        opDescriptor.freeRef();
        CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
        return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
    }, inputData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
        if (input.isAlive()) {
            @Nonnull TensorList data = CudaSystem.run(gpu -> {
                @Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
                @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
                @Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
                delta.freeRef();
                @Nullable final CudaTensor inputTensor = gpu.getTensor(input.getData(), precision, MemoryType.Device, false);
                // assert deltaTensor.size == inputTensor.size;
                @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
                CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
                CudaMemory rightTensorMemory = inputTensor.getMemory(gpu);
                CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(2), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), rightTensorMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
                deltaTensorMemory.dirty();
                rightTensorMemory.dirty();
                outputPtr.dirty();
                deltaTensorMemory.freeRef();
                rightTensorMemory.freeRef();
                CudaTensor cudaTensor = new CudaTensor(outputPtr, outputDescriptor, precision);
                Arrays.stream(new ReferenceCounting[] { deltaTensor, inputTensor, opDescriptor, outputDescriptor }).forEach(ReferenceCounting::freeRef);
                outputPtr.freeRef();
                return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
            }, delta);
            input.accumulate(buffer, data);
        } else {
            delta.freeRef();
        }
    }) {

        @Override
        public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
            getAccumulator().accept(buffer, delta);
        }

        @Override
        protected void _free() {
            inputData.freeRef();
            input.freeRef();
        }

        @Override
        public boolean isAlive() {
            for (@Nonnull final Result element : inObj) if (element.isAlive()) {
                return true;
            }
            return false;
        }
    };
}
Also used : CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) JsonObject(com.google.gson.JsonObject) Arrays(java.util.Arrays) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) jcuda.jcudnn.cudnnOpTensorOp(jcuda.jcudnn.cudnnOpTensorOp) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) Result(com.simiacryptus.mindseye.lang.Result) DataSerializer(com.simiacryptus.mindseye.lang.DataSerializer) Precision(com.simiacryptus.mindseye.lang.cudnn.Precision) List(java.util.List) LayerBase(com.simiacryptus.mindseye.lang.LayerBase) CudaSystem(com.simiacryptus.mindseye.lang.cudnn.CudaSystem) TensorList(com.simiacryptus.mindseye.lang.TensorList) Map(java.util.Map) MemoryType(com.simiacryptus.mindseye.lang.cudnn.MemoryType) ProductInputsLayer(com.simiacryptus.mindseye.layers.java.ProductInputsLayer) Layer(com.simiacryptus.mindseye.lang.Layer) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) CudaTensor(com.simiacryptus.mindseye.lang.cudnn.CudaTensor) CudaDevice(com.simiacryptus.mindseye.lang.cudnn.CudaDevice) Nonnull(javax.annotation.Nonnull) CudaMemory(com.simiacryptus.mindseye.lang.cudnn.CudaMemory) DeltaSet(com.simiacryptus.mindseye.lang.DeltaSet) CudaTensorList(com.simiacryptus.mindseye.lang.cudnn.CudaTensorList) TensorList(com.simiacryptus.mindseye.lang.TensorList) Result(com.simiacryptus.mindseye.lang.Result) ReferenceCounting(com.simiacryptus.mindseye.lang.ReferenceCounting) CudaResource(com.simiacryptus.mindseye.lang.cudnn.CudaResource) jcuda.jcudnn.cudnnOpTensorDescriptor(jcuda.jcudnn.cudnnOpTensorDescriptor) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Aggregations

Layer (com.simiacryptus.mindseye.lang.Layer)167 Nonnull (javax.annotation.Nonnull)159 Nullable (javax.annotation.Nullable)128 Arrays (java.util.Arrays)117 Tensor (com.simiacryptus.mindseye.lang.Tensor)116 List (java.util.List)108 Result (com.simiacryptus.mindseye.lang.Result)103 IntStream (java.util.stream.IntStream)98 DeltaSet (com.simiacryptus.mindseye.lang.DeltaSet)95 TensorList (com.simiacryptus.mindseye.lang.TensorList)93 Map (java.util.Map)83 TensorArray (com.simiacryptus.mindseye.lang.TensorArray)76 Logger (org.slf4j.Logger)76 LoggerFactory (org.slf4j.LoggerFactory)76 JsonObject (com.google.gson.JsonObject)70 DataSerializer (com.simiacryptus.mindseye.lang.DataSerializer)66 LayerBase (com.simiacryptus.mindseye.lang.LayerBase)64 NotebookOutput (com.simiacryptus.util.io.NotebookOutput)51 Collectors (java.util.stream.Collectors)42 Stream (java.util.stream.Stream)37