use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class BandAvgReducerLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
final Result input = inObj[0];
TensorList inputData = input.getData();
@Nonnull final int[] inputSize = inputData.getDimensions();
int length = inputData.length();
final int bands = inputSize[2];
CudaTensorList result = CudaSystem.run(gpu -> {
CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, bands, 1, 1);
long size = (long) precision.size * outputDescriptor.nStride * length;
@Nonnull final CudaMemory outputPtr = gpu.allocate(size, MemoryType.Managed, true);
CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_AVG, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
CudaMemory inputMemory = inputTensor.getMemory(gpu);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr());
outputPtr.dirty();
inputMemory.dirty();
Stream.of(inputMemory, inputTensor, reduceTensorDescriptor, workspacePtr, indexPtr, inputData).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, new int[] { 1, 1, bands }, precision);
});
int pixels = inputSize[0] * inputSize[1];
return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
TensorList passback;
passback = TensorArray.wrap(delta.stream().map(x -> {
Tensor tensor = new Tensor(inputSize[0], inputSize[1], inputSize[2]).setByCoord(c -> x.get(c.getCoords()[2]) * alpha / pixels);
x.freeRef();
return tensor;
}).toArray(i -> new Tensor[i]));
// passback = CudaSystem.run(gpu -> {
// CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
// @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision,
// length, inputSize[2], inputSize[1], inputSize[0]);
// @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
// CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
// @Nonnull final CudaDevice.CudaTensorDescriptor inputDescriptor = gpu.newTensorDescriptor(precision,
// 1, 1, inputSize[1], inputSize[0]);
// for(int batch=0;batch<length;batch++){
// Tensor tensor = delta.get(batch);
// for(int band=0;band<bands;band++){
// int i = batch * bands + band;
// CudaMemory img = outputPtr.withByteOffset(precision.size * i * outputDescriptor.cStride);
// CudaMemory val = deltaMemory.withByteOffset(precision.size * i);
// gpu.cudnnSetTensor(inputDescriptor.getPtr(), img.getPtr(), precision.getPointer(tensor.get(band) / outputDescriptor.cStride));
// img.freeRef();
// val.freeRef();
// outputPtr.dirty().synchronize();
// }
// }
// Stream.of(deltaMemory, deltaTensor, inputDescriptor).forEach(ReferenceCounting::freeRef);
// return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, inputSize, precision);
// });
input.accumulate(ctx, passback);
}) {
@Override
protected void _free() {
super._free();
input.freeRef();
}
};
}
use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class BinarySumLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (inObj.length == 1) {
if (rightFactor != 1)
throw new IllegalStateException();
if (leftFactor != 1)
throw new IllegalStateException();
return inObj[0];
}
if (inObj.length > 2) {
if (rightFactor != 1)
throw new IllegalStateException();
if (leftFactor != 1)
throw new IllegalStateException();
return Arrays.stream(inObj).reduce((a, b) -> evalAndFree(a, b)).get();
}
assert (inObj.length == 2);
final TensorList leftData = inObj[0].getData();
final TensorList rightData = inObj[1].getData();
int[] leftDimensions = leftData.getDimensions();
if (3 < leftDimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(leftDimensions));
}
@Nonnull final int[] dimensions = { leftDimensions.length < 1 ? 0 : leftDimensions[0], leftDimensions.length < 2 ? 1 : leftDimensions[1], leftDimensions.length < 3 ? 1 : leftDimensions[2] };
final int length = leftData.length();
if (length != rightData.length())
throw new IllegalArgumentException();
if (3 != dimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
}
for (int i = 1; i < inObj.length; i++) {
if (Tensor.length(dimensions) != Tensor.length(inObj[i].getData().getDimensions())) {
throw new IllegalArgumentException(Arrays.toString(dimensions) + " != " + Arrays.toString(inObj[i].getData().getDimensions()));
}
}
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
return new Result(CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_ADD, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
// .getDenseAndFree(gpu);//.moveTo(gpu.getDeviceNumber());
@Nullable final CudaTensor lPtr = gpu.getTensor(leftData, precision, MemoryType.Device, false);
// .getDenseAndFree(gpu);//.moveTo(gpu.getDeviceNumber());
@Nullable final CudaTensor rPtr = gpu.getTensor(rightData, precision, MemoryType.Device, false);
@Nonnull final CudaMemory outputPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed, true);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
CudaMemory rPtrMemory = rPtr.getMemory(gpu);
gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(leftFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(rightFactor), rPtr.descriptor.getPtr(), rPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr());
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
lPtrMemory.dirty();
rPtrMemory.dirty();
outputPtr.dirty();
rPtrMemory.freeRef();
lPtrMemory.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
Stream.<ReferenceCounting>of(opDescriptor, lPtr, rPtr).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, leftData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
Runnable a = () -> {
if (inObj[0].isAlive()) {
CudaTensorList tensorList = CudaSystem.run(gpu -> {
@Nullable final CudaTensor lPtr = gpu.getTensor(delta, precision, MemoryType.Device, false);
@Nonnull final CudaMemory passbackPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed.normalize(), true);
@Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
gpu.cudnnTransformTensor(precision.getPointer(leftFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), passbackDescriptor.getPtr(), passbackPtr.getPtr());
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
passbackPtr.dirty();
lPtrMemory.dirty();
lPtrMemory.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(passbackPtr, passbackDescriptor, precision);
lPtr.freeRef();
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, delta);
inObj[0].accumulate(buffer, tensorList);
}
};
Runnable b = () -> {
if (inObj[1].isAlive()) {
CudaTensorList tensorList = CudaSystem.run(gpu -> {
@Nullable final CudaTensor lPtr = gpu.getTensor(delta, precision, MemoryType.Device, false);
@Nonnull final CudaMemory outputPtr = gpu.allocate(precision.size * Tensor.length(dimensions) * length, MemoryType.Managed.normalize(), true);
@Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
CudaMemory lPtrMemory = lPtr.getMemory(gpu);
gpu.cudnnTransformTensor(precision.getPointer(rightFactor), lPtr.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), passbackDescriptor.getPtr(), outputPtr.getPtr());
outputPtr.dirty();
lPtrMemory.dirty();
lPtrMemory.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, passbackDescriptor, precision);
lPtr.freeRef();
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, delta);
inObj[1].accumulate(buffer, tensorList);
}
};
if (CoreSettings.INSTANCE.isSingleThreaded())
TestUtil.runAllSerial(a, b);
else
TestUtil.runAllParallel(a, b);
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(x -> x.freeRef());
leftData.freeRef();
rightData.freeRef();
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class MaxImageBandLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
assert 1 == inObj.length;
final TensorList inputData = inObj[0].getData();
inputData.addRef();
inputData.length();
@Nonnull final int[] inputDims = inputData.getDimensions();
assert 3 == inputDims.length;
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
final Coordinate[][] maxCoords = inputData.stream().map(data -> {
Coordinate[] coordinates = IntStream.range(0, inputDims[2]).mapToObj(band -> {
return data.coordStream(true).filter(e -> e.getCoords()[2] == band).max(Comparator.comparing(c -> data.get(c))).get();
}).toArray(i -> new Coordinate[i]);
data.freeRef();
return coordinates;
}).toArray(i -> new Coordinate[i][]);
return new Result(TensorArray.wrap(IntStream.range(0, inputData.length()).mapToObj(dataIndex -> {
Tensor tensor = inputData.get(dataIndex);
final DoubleStream doubleStream = IntStream.range(0, inputDims[2]).mapToDouble(band -> {
final int[] maxCoord = maxCoords[dataIndex][band].getCoords();
double v = tensor.get(maxCoord[0], maxCoord[1], band);
return v;
});
Tensor tensor1 = new Tensor(1, 1, inputDims[2]).set(Tensor.getDoubles(doubleStream, inputDims[2]));
tensor.freeRef();
return tensor1;
}).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (inObj[0].isAlive()) {
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, delta.length()).parallel().mapToObj(dataIndex -> {
Tensor deltaTensor = delta.get(dataIndex);
@Nonnull final Tensor passback = new Tensor(inputData.getDimensions());
IntStream.range(0, inputDims[2]).forEach(b -> {
final int[] maxCoord = maxCoords[dataIndex][b].getCoords();
passback.set(new int[] { maxCoord[0], maxCoord[1], b }, deltaTensor.get(0, 0, b));
});
deltaTensor.freeRef();
return passback;
}).toArray(i -> new Tensor[i]));
inObj[0].accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
inputData.freeRef();
}
@Override
public boolean isAlive() {
return inObj[0].isAlive();
}
};
}
use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class MaxPoolingLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
final Result in = inObj[0];
in.getData().length();
@Nonnull final int[] inputDims = in.getData().getDimensions();
final List<Tuple2<Integer, int[]>> regions = MaxPoolingLayer.calcRegionsCache.apply(new MaxPoolingLayer.CalcRegionsParameter(inputDims, kernelDims));
final Tensor[] outputA = IntStream.range(0, in.getData().length()).mapToObj(dataIndex -> {
final int[] newDims = IntStream.range(0, inputDims.length).map(i -> {
return (int) Math.ceil(inputDims[i] * 1.0 / kernelDims[i]);
}).toArray();
@Nonnull final Tensor output = new Tensor(newDims);
return output;
}).toArray(i -> new Tensor[i]);
Arrays.stream(outputA).mapToInt(x -> x.length()).sum();
@Nonnull final int[][] gradientMapA = new int[in.getData().length()][];
IntStream.range(0, in.getData().length()).forEach(dataIndex -> {
@Nullable final Tensor input = in.getData().get(dataIndex);
final Tensor output = outputA[dataIndex];
@Nonnull final IntToDoubleFunction keyExtractor = inputCoords -> input.get(inputCoords);
@Nonnull final int[] gradientMap = new int[input.length()];
regions.parallelStream().forEach(tuple -> {
final Integer from = tuple.getFirst();
final int[] toList = tuple.getSecond();
int toMax = -1;
double bestValue = Double.NEGATIVE_INFINITY;
for (final int c : toList) {
final double value = keyExtractor.applyAsDouble(c);
if (-1 == toMax || bestValue < value) {
bestValue = value;
toMax = c;
}
}
gradientMap[from] = toMax;
output.set(from, input.get(toMax));
});
input.freeRef();
gradientMapA[dataIndex] = gradientMap;
});
return new Result(TensorArray.wrap(outputA), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList data) -> {
if (in.isAlive()) {
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, in.getData().length()).parallel().mapToObj(dataIndex -> {
@Nonnull final Tensor backSignal = new Tensor(inputDims);
final int[] ints = gradientMapA[dataIndex];
@Nullable final Tensor datum = data.get(dataIndex);
for (int i = 0; i < datum.length(); i++) {
backSignal.add(ints[i], datum.get(i));
}
datum.freeRef();
return backSignal;
}).toArray(i -> new Tensor[i]));
in.accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return in.isAlive();
}
};
}
use of com.simiacryptus.mindseye.lang.DeltaSet in project MindsEye by SimiaCryptus.
the class MonitoringSynapse method eval.
@Override
public Result eval(@Nonnull final Result... inObj) {
assert 1 == inObj.length;
final Result input = inObj[0];
final TensorList inputdata = input.getData();
input.addRef();
inputdata.addRef();
System.nanoTime();
System.nanoTime();
totalBatches++;
totalItems += inputdata.length();
forwardStatistics.clear();
inputdata.stream().parallel().forEach(t -> {
forwardStatistics.add(t.getData());
t.freeRef();
});
return new Result(inputdata, (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList data) -> {
backpropStatistics.clear();
data.addRef();
input.accumulate(buffer, data);
data.stream().parallel().forEach(t -> {
backpropStatistics.add(t.getData());
t.freeRef();
});
}) {
@Override
public boolean isAlive() {
return input.isAlive();
}
@Override
protected void _free() {
input.freeRef();
}
};
}
Aggregations