use of com.simiacryptus.mindseye.lang.Tensor in project MindsEye by SimiaCryptus.
the class ConvolutionLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
final Result input = inObj[0];
final TensorList batch = input.getData();
batch.addRef();
@Nonnull final int[] inputDims = batch.get(0).getDimensions();
@Nonnull final int[] kernelDims = kernel.getDimensions();
@Nullable final double[] kernelData = ConvolutionLayer.this.kernel.getData();
@Nonnull final ConvolutionController convolutionController = new ConvolutionController(inputDims, kernelDims, paddingX, paddingY);
final Tensor[] output = IntStream.range(0, batch.length()).mapToObj(dataIndex -> new Tensor(convolutionController.getOutputDims())).toArray(i -> new Tensor[i]);
try {
final double[][] inputBuffers = batch.stream().map(x -> {
@Nullable double[] data = x.getData();
x.detach();
return data;
}).toArray(i -> new double[i][]);
final double[][] outputBuffers = Arrays.stream(output).map(x -> x.getData()).toArray(i -> new double[i][]);
convolutionController.convolve(inputBuffers, kernelData, outputBuffers);
} catch (@Nonnull final Throwable e) {
throw new RuntimeException("Error mapCoords image res " + Arrays.toString(inputDims), e);
}
int outputLength = output.length;
return new Result(TensorArray.wrap(output), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList error) -> {
if (!isFrozen()) {
final double[][] inputBuffers = batch.stream().map(x -> {
@Nullable double[] data = x.getData();
x.freeRef();
return data;
}).toArray(i -> new double[i][]);
final double[][] outputBuffers = error.stream().map(x -> {
@Nullable double[] data = x.getData();
x.freeRef();
return data;
}).toArray(i -> new double[i][]);
@Nonnull final Tensor weightGradient = new Tensor(kernelDims);
convolutionController.gradient(inputBuffers, weightGradient.getData(), outputBuffers);
buffer.get(ConvolutionLayer.this, kernelData).addInPlace(weightGradient.getData()).freeRef();
weightGradient.freeRef();
}
if (input.isAlive()) {
final Tensor[] inputBufferTensors = IntStream.range(0, outputLength).mapToObj(dataIndex -> new Tensor(inputDims)).toArray(i -> new Tensor[i]);
final double[][] inputBuffers = Arrays.stream(inputBufferTensors).map(x -> {
@Nullable double[] data = x.getData();
return data;
}).toArray(i -> new double[i][]);
final double[][] outputBuffers = error.stream().map(x -> {
@Nullable double[] data = x.getData();
x.freeRef();
return data;
}).toArray(i -> new double[i][]);
convolutionController.backprop(inputBuffers, kernelData, outputBuffers);
@Nonnull TensorArray tensorArray = TensorArray.wrap(inputBufferTensors);
input.accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
batch.freeRef();
}
@Override
public boolean isAlive() {
return input.isAlive() || !isFrozen();
}
};
}
use of com.simiacryptus.mindseye.lang.Tensor in project MindsEye by SimiaCryptus.
the class BandAvgReducerLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
final Result input = inObj[0];
TensorList inputData = input.getData();
@Nonnull final int[] inputSize = inputData.getDimensions();
int length = inputData.length();
final int bands = inputSize[2];
CudaTensorList result = CudaSystem.run(gpu -> {
CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, bands, 1, 1);
long size = (long) precision.size * outputDescriptor.nStride * length;
@Nonnull final CudaMemory outputPtr = gpu.allocate(size, MemoryType.Managed, true);
CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_AVG, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
CudaMemory inputMemory = inputTensor.getMemory(gpu);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(alpha), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr());
outputPtr.dirty();
inputMemory.dirty();
Stream.of(inputMemory, inputTensor, reduceTensorDescriptor, workspacePtr, indexPtr, inputData).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, new int[] { 1, 1, bands }, precision);
});
int pixels = inputSize[0] * inputSize[1];
return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
TensorList passback;
passback = TensorArray.wrap(delta.stream().map(x -> {
Tensor tensor = new Tensor(inputSize[0], inputSize[1], inputSize[2]).setByCoord(c -> x.get(c.getCoords()[2]) * alpha / pixels);
x.freeRef();
return tensor;
}).toArray(i -> new Tensor[i]));
// passback = CudaSystem.run(gpu -> {
// CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
// @Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision,
// length, inputSize[2], inputSize[1], inputSize[0]);
// @Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
// CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
// @Nonnull final CudaDevice.CudaTensorDescriptor inputDescriptor = gpu.newTensorDescriptor(precision,
// 1, 1, inputSize[1], inputSize[0]);
// for(int batch=0;batch<length;batch++){
// Tensor tensor = delta.get(batch);
// for(int band=0;band<bands;band++){
// int i = batch * bands + band;
// CudaMemory img = outputPtr.withByteOffset(precision.size * i * outputDescriptor.cStride);
// CudaMemory val = deltaMemory.withByteOffset(precision.size * i);
// gpu.cudnnSetTensor(inputDescriptor.getPtr(), img.getPtr(), precision.getPointer(tensor.get(band) / outputDescriptor.cStride));
// img.freeRef();
// val.freeRef();
// outputPtr.dirty().synchronize();
// }
// }
// Stream.of(deltaMemory, deltaTensor, inputDescriptor).forEach(ReferenceCounting::freeRef);
// return CudaTensorList.wrap(CudaTensor.wrap(outputPtr, outputDescriptor, precision), length, inputSize, precision);
// });
input.accumulate(ctx, passback);
}) {
@Override
protected void _free() {
super._free();
input.freeRef();
}
};
}
use of com.simiacryptus.mindseye.lang.Tensor in project MindsEye by SimiaCryptus.
the class ExplodedConvolutionGrid method read.
/**
* Read tensor.
*
* @param extractor the extractor
* @return the tensor
*/
public Tensor read(@Nonnull Function<ExplodedConvolutionLeg, Tensor> extractor) {
if (1 == subLayers.size()) {
return extractor.apply(subLayers.get(0));
} else {
@Nonnull final Tensor filterDelta = new Tensor(convolutionParams.masterFilterDimensions);
for (@Nonnull ExplodedConvolutionLeg leg : subLayers) {
Tensor tensor = extractor.apply(leg);
tensor.forEach((v, c) -> {
int[] coords = c.getCoords();
filterDelta.set(coords[0], coords[1], getFilterBand(leg, coords[2]), v);
}, false);
tensor.freeRef();
}
return filterDelta;
}
}
use of com.simiacryptus.mindseye.lang.Tensor in project MindsEye by SimiaCryptus.
the class MaxDropoutNoiseLayer method getCellMap.
private List<List<Coordinate>> getCellMap(@Nonnull final IntArray dims) {
Tensor tensor = new Tensor(dims.data);
ArrayList<List<Coordinate>> lists = new ArrayList<>(tensor.coordStream(true).collect(Collectors.groupingBy((@Nonnull final Coordinate c) -> {
int cellId = 0;
int max = 0;
for (int dim = 0; dim < dims.size(); dim++) {
final int pos = c.getCoords()[dim] / kernelSize[dim];
cellId = cellId * max + pos;
max = dims.get(dim) / kernelSize[dim];
}
return cellId;
})).values());
tensor.freeRef();
return lists;
}
use of com.simiacryptus.mindseye.lang.Tensor in project MindsEye by SimiaCryptus.
the class MaxImageBandLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
assert 1 == inObj.length;
final TensorList inputData = inObj[0].getData();
inputData.addRef();
inputData.length();
@Nonnull final int[] inputDims = inputData.getDimensions();
assert 3 == inputDims.length;
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
final Coordinate[][] maxCoords = inputData.stream().map(data -> {
Coordinate[] coordinates = IntStream.range(0, inputDims[2]).mapToObj(band -> {
return data.coordStream(true).filter(e -> e.getCoords()[2] == band).max(Comparator.comparing(c -> data.get(c))).get();
}).toArray(i -> new Coordinate[i]);
data.freeRef();
return coordinates;
}).toArray(i -> new Coordinate[i][]);
return new Result(TensorArray.wrap(IntStream.range(0, inputData.length()).mapToObj(dataIndex -> {
Tensor tensor = inputData.get(dataIndex);
final DoubleStream doubleStream = IntStream.range(0, inputDims[2]).mapToDouble(band -> {
final int[] maxCoord = maxCoords[dataIndex][band].getCoords();
double v = tensor.get(maxCoord[0], maxCoord[1], band);
return v;
});
Tensor tensor1 = new Tensor(1, 1, inputDims[2]).set(Tensor.getDoubles(doubleStream, inputDims[2]));
tensor.freeRef();
return tensor1;
}).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (inObj[0].isAlive()) {
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, delta.length()).parallel().mapToObj(dataIndex -> {
Tensor deltaTensor = delta.get(dataIndex);
@Nonnull final Tensor passback = new Tensor(inputData.getDimensions());
IntStream.range(0, inputDims[2]).forEach(b -> {
final int[] maxCoord = maxCoords[dataIndex][b].getCoords();
passback.set(new int[] { maxCoord[0], maxCoord[1], b }, deltaTensor.get(0, 0, b));
});
deltaTensor.freeRef();
return passback;
}).toArray(i -> new Tensor[i]));
inObj[0].accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
inputData.freeRef();
}
@Override
public boolean isAlive() {
return inObj[0].isAlive();
}
};
}
Aggregations