use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class SumInputsLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
@Nonnull final int[] dimensions = inObj[0].getData().getDimensions();
if (3 != dimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
}
for (int i = 1; i < inObj.length; i++) {
if (Tensor.length(dimensions) != Tensor.length(inObj[i].getData().getDimensions())) {
throw new IllegalArgumentException(Arrays.toString(dimensions) + " != " + Arrays.toString(inObj[i].getData().getDimensions()));
}
}
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
Stream<TensorList> tensorListStream = Arrays.stream(inObj).map(x -> x.getData());
if (!CoreSettings.INSTANCE.isSingleThreaded() && parallel)
tensorListStream = tensorListStream.parallel();
return new Result(tensorListStream.reduce((leftData, rightData) -> CudaSystem.run(gpu -> {
return gpu.addAndFree(precision, leftData, rightData);
}, leftData, rightData)).get(), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
@Nonnull Stream<Result> deltaStream = Arrays.stream(inObj);
if (!CoreSettings.INSTANCE.isSingleThreaded() && parallel)
deltaStream = deltaStream.parallel();
deltaStream.filter(Result::isAlive).forEach(obj -> {
delta.addRef();
obj.accumulate(buffer, delta);
});
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(x -> x.freeRef());
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class SumReducerLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
final Result input = inObj[0];
final TensorList inputData = input.getData();
@Nonnull final int[] inputSize = inputData.getDimensions();
int length = inputData.length();
CudaTensorList result = CudaSystem.run(gpu -> {
CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
inputData.freeRef();
CudaMemory inputMemory = inputTensor.getMemory(gpu);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, 1, 1, 1);
long size = (long) precision.size * outputDescriptor.nStride * length;
@Nonnull final CudaMemory outputMemory = gpu.allocate(size, MemoryType.Managed, true);
CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_ADD, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
// outputPtr.synchronize();
gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputMemory.getPtr());
inputMemory.dirty();
outputMemory.dirty();
workspacePtr.dirty();
Stream.of(inputTensor, inputMemory, reduceTensorDescriptor, workspacePtr, indexPtr).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(outputMemory, outputDescriptor, precision), length, new int[] { 1, 1, 1 }, precision);
});
return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
// Not supported by CuDNN?
// CudaTensorList passback = CudaSystem.run(gpu -> {
// CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
// CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
//
// @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor1 = gpu.newTensorDescriptor(
// precision, length, inputSize[2], inputSize[1], inputSize[0]
// );
// @Nonnull final CudaMemory passbackPtr1 = gpu.allocate((long) precision.size * passbackDescriptor1.nStride * length, MemoryType.Device, false);
// gpu.cudnnAddTensor(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaMemory.getPtr(),
// precision.getPointer(1.0), passbackDescriptor1.getPtr(), passbackPtr1.getPtr());
// passbackPtr1.dirty();
//
// Stream.of(deltaTensor, deltaMemory, passbackDescriptor1, passbackPtr1).forEach(ReferenceCounting::freeRef);
// return CudaTensorList.wrap(CudaTensor.wrap(passbackPtr1, passbackDescriptor1, precision), length, inputSize, precision);
// });
TensorList passback = TensorArray.wrap(IntStream.range(0, length).mapToObj(i -> {
Tensor tensor = delta.get(i);
Tensor tensor1 = new Tensor(inputSize).setAll(tensor.get(0));
tensor.freeRef();
return tensor1;
}).toArray(i -> new Tensor[i]));
input.accumulate(ctx, passback);
}) {
@Override
protected void _free() {
super._free();
input.freeRef();
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class AvgPoolingLayer method eval.
@Nonnull
@SuppressWarnings("unchecked")
@Override
public Result eval(@Nonnull final Result... inObj) {
final int kernelSize = Tensor.length(kernelDims);
final TensorList data = inObj[0].getData();
@Nonnull final int[] inputDims = data.getDimensions();
final int[] newDims = IntStream.range(0, inputDims.length).map(i -> {
assert 0 == inputDims[i] % kernelDims[i] : inputDims[i] + ":" + kernelDims[i];
return inputDims[i] / kernelDims[i];
}).toArray();
final Map<Coordinate, List<int[]>> coordMap = AvgPoolingLayer.getCoordMap(kernelDims, newDims);
final Tensor[] outputValues = IntStream.range(0, data.length()).mapToObj(dataIndex -> {
@Nullable final Tensor input = data.get(dataIndex);
@Nonnull final Tensor output = new Tensor(newDims);
for (@Nonnull final Entry<Coordinate, List<int[]>> entry : coordMap.entrySet()) {
double sum = entry.getValue().stream().mapToDouble(inputCoord -> input.get(inputCoord)).sum();
if (Double.isFinite(sum)) {
output.add(entry.getKey(), sum / kernelSize);
}
}
input.freeRef();
return output;
}).toArray(i -> new Tensor[i]);
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
return new Result(TensorArray.wrap(outputValues), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (inObj[0].isAlive()) {
final Tensor[] passback = IntStream.range(0, delta.length()).mapToObj(dataIndex -> {
@Nullable Tensor tensor = delta.get(dataIndex);
@Nonnull final Tensor backSignal = new Tensor(inputDims);
for (@Nonnull final Entry<Coordinate, List<int[]>> outputMapping : coordMap.entrySet()) {
final double outputValue = tensor.get(outputMapping.getKey());
for (@Nonnull final int[] inputCoord : outputMapping.getValue()) {
backSignal.add(inputCoord, outputValue / kernelSize);
}
}
tensor.freeRef();
return backSignal;
}).toArray(i -> new Tensor[i]);
@Nonnull TensorArray tensorArray = TensorArray.wrap(passback);
inObj[0].accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return inObj[0].isAlive();
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class FullyConnectedReferenceLayer method eval.
@Nonnull
@Override
public Result eval(final Result... inObj) {
final Result inputResult = inObj[0];
final TensorList indata = inputResult.getData();
inputResult.addRef();
indata.addRef();
@Nonnull int[] inputDimensions = indata.getDimensions();
assert Tensor.length(inputDimensions) == Tensor.length(this.inputDims) : Arrays.toString(inputDimensions) + " == " + Arrays.toString(this.inputDims);
weights.addRef();
return new Result(TensorArray.wrap(IntStream.range(0, indata.length()).mapToObj(index -> {
@Nullable final Tensor input = indata.get(index);
@Nullable final Tensor output = new Tensor(outputDims);
weights.coordStream(false).forEach(c -> {
int[] coords = c.getCoords();
double prev = output.get(coords[1]);
double w = weights.get(c);
double i = input.get(coords[0]);
double value = prev + w * i;
output.set(coords[1], value);
});
input.freeRef();
return output;
}).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (!isFrozen()) {
final Delta<Layer> deltaBuffer = buffer.get(FullyConnectedReferenceLayer.this, getWeights().getData());
Tensor[] array = IntStream.range(0, indata.length()).mapToObj(i -> {
@Nullable final Tensor inputTensor = indata.get(i);
@Nullable final Tensor deltaTensor = delta.get(i);
@Nonnull Tensor weights = new Tensor(FullyConnectedReferenceLayer.this.weights.getDimensions());
weights.coordStream(false).forEach(c -> {
int[] coords = c.getCoords();
weights.set(c, inputTensor.get(coords[0]) * deltaTensor.get(coords[1]));
});
inputTensor.freeRef();
deltaTensor.freeRef();
return weights;
}).toArray(i -> new Tensor[i]);
Tensor tensor = Arrays.stream(array).reduce((a, b) -> {
Tensor c = a.addAndFree(b);
b.freeRef();
return c;
}).get();
deltaBuffer.addInPlace(tensor.getData()).freeRef();
tensor.freeRef();
}
if (inputResult.isAlive()) {
@Nonnull final TensorList tensorList = TensorArray.wrap(IntStream.range(0, indata.length()).mapToObj(i -> {
@Nullable final Tensor inputTensor = new Tensor(inputDims);
@Nullable final Tensor deltaTensor = delta.get(i);
weights.coordStream(false).forEach(c -> {
int[] coords = c.getCoords();
inputTensor.set(coords[0], inputTensor.get(coords[0]) + weights.get(c) * deltaTensor.get(coords[1]));
});
deltaTensor.freeRef();
return inputTensor;
}).toArray(i -> new Tensor[i]));
inputResult.accumulate(buffer, tensorList);
}
}) {
@Override
protected void _free() {
indata.freeRef();
inputResult.freeRef();
weights.freeRef();
}
@Override
public boolean isAlive() {
return inputResult.isAlive() || !isFrozen();
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class GaussianNoiseLayer method eval.
@Nonnull
@Override
public Result eval(final Result... inObj) {
final Result in0 = inObj[0];
final TensorList inputData = in0.getData();
final int itemCnt = inputData.length();
in0.addRef();
inputData.addRef();
final Tensor[] outputA = IntStream.range(0, itemCnt).mapToObj(dataIndex -> {
@Nonnull final Random random = new Random(seed);
@Nullable final Tensor input = inputData.get(dataIndex);
@Nullable final Tensor output = input.map(x -> {
return x + random.nextGaussian() * getValue();
});
input.freeRef();
return output;
}).toArray(i -> new Tensor[i]);
return new Result(TensorArray.wrap(outputA), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (in0.isAlive()) {
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, delta.length()).mapToObj(dataIndex -> {
Tensor tensor = delta.get(dataIndex);
@Nullable final double[] deltaData = tensor.getData();
@Nonnull final Tensor passback = new Tensor(inputData.getDimensions());
for (int i = 0; i < passback.length(); i++) {
passback.set(i, deltaData[i]);
}
tensor.freeRef();
return passback;
}).toArray(i -> new Tensor[i]));
in0.accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
inputData.freeRef();
in0.freeRef();
}
@Override
public boolean isAlive() {
return in0.isAlive() || !isFrozen();
}
};
}
Aggregations