use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class FullyConnectedLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
final TensorList indata = inObj[0].getData();
indata.addRef();
for (@Nonnull Result result : inObj) {
result.addRef();
}
FullyConnectedLayer.this.addRef();
assert Tensor.length(indata.getDimensions()) == Tensor.length(this.inputDims) : Arrays.toString(indata.getDimensions()) + " == " + Arrays.toString(this.inputDims);
@Nonnull DoubleMatrix doubleMatrix = new DoubleMatrix(Tensor.length(indata.getDimensions()), Tensor.length(outputDims), this.weights.getData());
@Nonnull final DoubleMatrix matrixObj = FullyConnectedLayer.transpose(doubleMatrix);
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, indata.length()).parallel().mapToObj(dataIndex -> {
@Nullable final Tensor input = indata.get(dataIndex);
@Nullable final Tensor output = new Tensor(outputDims);
matrixObj.mmuli(new DoubleMatrix(input.length(), 1, input.getData()), new DoubleMatrix(output.length(), 1, output.getData()));
input.freeRef();
return output;
}).toArray(i -> new Tensor[i]));
RecycleBin.DOUBLES.recycle(matrixObj.data, matrixObj.data.length);
this.weights.addRef();
return new Result(tensorArray, (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (!isFrozen()) {
final Delta<Layer> deltaBuffer = buffer.get(FullyConnectedLayer.this, this.weights.getData());
final int threads = 4;
IntStream.range(0, threads).parallel().mapToObj(x -> x).flatMap(thread -> {
@Nullable Stream<Tensor> stream = IntStream.range(0, indata.length()).filter(i -> thread == i % threads).mapToObj(dataIndex -> {
@Nonnull final Tensor weightDelta = new Tensor(Tensor.length(inputDims), Tensor.length(outputDims));
Tensor deltaTensor = delta.get(dataIndex);
Tensor inputTensor = indata.get(dataIndex);
FullyConnectedLayer.crossMultiplyT(deltaTensor.getData(), inputTensor.getData(), weightDelta.getData());
inputTensor.freeRef();
deltaTensor.freeRef();
return weightDelta;
});
return stream;
}).reduce((a, b) -> {
@Nullable Tensor c = a.addAndFree(b);
b.freeRef();
return c;
}).map(data -> {
@Nonnull Delta<Layer> layerDelta = deltaBuffer.addInPlace(data.getData());
data.freeRef();
return layerDelta;
});
deltaBuffer.freeRef();
}
if (inObj[0].isAlive()) {
@Nonnull final TensorList tensorList = TensorArray.wrap(IntStream.range(0, indata.length()).parallel().mapToObj(dataIndex -> {
Tensor deltaTensor = delta.get(dataIndex);
@Nonnull final Tensor passback = new Tensor(indata.getDimensions());
FullyConnectedLayer.multiply(this.weights.getData(), deltaTensor.getData(), passback.getData());
deltaTensor.freeRef();
return passback;
}).toArray(i -> new Tensor[i]));
inObj[0].accumulate(buffer, tensorList);
}
}) {
@Override
protected void _free() {
indata.freeRef();
FullyConnectedLayer.this.freeRef();
for (@Nonnull Result result : inObj) {
result.freeRef();
}
FullyConnectedLayer.this.weights.freeRef();
}
@Override
public boolean isAlive() {
return !isFrozen() || Arrays.stream(inObj).anyMatch(x -> x.isAlive());
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class HyperbolicActivationLayer method eval.
@Nonnull
@Override
public Result eval(final Result... inObj) {
final TensorList indata = inObj[0].getData();
indata.addRef();
inObj[0].addRef();
weights.addRef();
HyperbolicActivationLayer.this.addRef();
final int itemCnt = indata.length();
return new Result(TensorArray.wrap(IntStream.range(0, itemCnt).mapToObj(dataIndex -> {
@Nullable final Tensor input = indata.get(dataIndex);
@Nullable Tensor map = input.map(v -> {
final int sign = v < 0 ? negativeMode : 1;
final double a = Math.max(0, weights.get(v < 0 ? 1 : 0));
return sign * (Math.sqrt(Math.pow(a * v, 2) + 1) - a) / a;
});
input.freeRef();
return map;
}).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (!isFrozen()) {
IntStream.range(0, delta.length()).forEach(dataIndex -> {
@Nullable Tensor deltaI = delta.get(dataIndex);
@Nullable Tensor inputI = indata.get(dataIndex);
@Nullable final double[] deltaData = deltaI.getData();
@Nullable final double[] inputData = inputI.getData();
@Nonnull final Tensor weightDelta = new Tensor(weights.getDimensions());
for (int i = 0; i < deltaData.length; i++) {
final double d = deltaData[i];
final double x = inputData[i];
final int sign = x < 0 ? negativeMode : 1;
final double a = Math.max(0, weights.getData()[x < 0 ? 1 : 0]);
weightDelta.add(x < 0 ? 1 : 0, -sign * d / (a * a * Math.sqrt(1 + Math.pow(a * x, 2))));
}
deltaI.freeRef();
inputI.freeRef();
buffer.get(HyperbolicActivationLayer.this, weights.getData()).addInPlace(weightDelta.getData()).freeRef();
weightDelta.freeRef();
});
}
if (inObj[0].isAlive()) {
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, delta.length()).mapToObj(dataIndex -> {
@Nullable Tensor inputTensor = indata.get(dataIndex);
Tensor deltaTensor = delta.get(dataIndex);
@Nullable final double[] deltaData = deltaTensor.getData();
@Nonnull final int[] dims = indata.getDimensions();
@Nonnull final Tensor passback = new Tensor(dims);
for (int i = 0; i < passback.length(); i++) {
final double x = inputTensor.getData()[i];
final double d = deltaData[i];
final int sign = x < 0 ? negativeMode : 1;
final double a = Math.max(0, weights.getData()[x < 0 ? 1 : 0]);
passback.set(i, sign * d * a * x / Math.sqrt(1 + a * x * a * x));
}
deltaTensor.freeRef();
inputTensor.freeRef();
return passback;
}).toArray(i -> new Tensor[i]));
inObj[0].accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
indata.freeRef();
inObj[0].freeRef();
weights.freeRef();
HyperbolicActivationLayer.this.freeRef();
}
@Override
public boolean isAlive() {
return inObj[0].isAlive() || !isFrozen();
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class ImgBandScaleLayer method eval.
/**
* Eval nn result.
*
* @param input the input
* @return the nn result
*/
@Nonnull
public Result eval(@Nonnull final Result input) {
@Nullable final double[] weights = getWeights();
final TensorList inData = input.getData();
inData.addRef();
input.addRef();
@Nullable Function<Tensor, Tensor> tensorTensorFunction = tensor -> {
if (tensor.getDimensions().length != 3) {
throw new IllegalArgumentException(Arrays.toString(tensor.getDimensions()));
}
if (tensor.getDimensions()[2] != weights.length) {
throw new IllegalArgumentException(String.format("%s: %s does not have %s bands", getName(), Arrays.toString(tensor.getDimensions()), weights.length));
}
@Nullable Tensor tensor1 = tensor.mapCoords(c -> tensor.get(c) * weights[c.getCoords()[2]]);
tensor.freeRef();
return tensor1;
};
Tensor[] data = inData.stream().parallel().map(tensorTensorFunction).toArray(i -> new Tensor[i]);
return new Result(TensorArray.wrap(data), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (!isFrozen()) {
final Delta<Layer> deltaBuffer = buffer.get(ImgBandScaleLayer.this, weights);
IntStream.range(0, delta.length()).forEach(index -> {
@Nonnull int[] dimensions = delta.getDimensions();
int z = dimensions[2];
int y = dimensions[1];
int x = dimensions[0];
final double[] array = RecycleBin.DOUBLES.obtain(z);
Tensor deltaTensor = delta.get(index);
@Nullable final double[] deltaArray = deltaTensor.getData();
Tensor inputTensor = inData.get(index);
@Nullable final double[] inputData = inputTensor.getData();
for (int i = 0; i < z; i++) {
for (int j = 0; j < y * x; j++) {
// array[i] += deltaArray[i + z * j];
array[i] += deltaArray[i * x * y + j] * inputData[i * x * y + j];
}
}
inputTensor.freeRef();
deltaTensor.freeRef();
assert Arrays.stream(array).allMatch(v -> Double.isFinite(v));
deltaBuffer.addInPlace(array);
RecycleBin.DOUBLES.recycle(array, array.length);
});
deltaBuffer.freeRef();
}
if (input.isAlive()) {
Tensor[] tensors = delta.stream().map(t -> {
@Nullable Tensor tensor = t.mapCoords((c) -> t.get(c) * weights[c.getCoords()[2]]);
t.freeRef();
return tensor;
}).toArray(i -> new Tensor[i]);
@Nonnull TensorArray tensorArray = TensorArray.wrap(tensors);
input.accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
inData.freeRef();
input.freeRef();
}
@Override
public boolean isAlive() {
return input.isAlive() || !isFrozen();
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class ImgConcatLayer method eval.
@Nullable
@Override
public Result eval(@Nonnull final Result... inObj) {
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
assert Arrays.stream(inObj).allMatch(x -> x.getData().getDimensions().length == 3) : "This component is for use mapCoords 3d image tensors only";
final int numBatches = inObj[0].getData().length();
assert Arrays.stream(inObj).allMatch(x -> x.getData().length() == numBatches) : "All inputs must use same batch size";
@Nonnull final int[] outputDims = Arrays.copyOf(inObj[0].getData().getDimensions(), 3);
outputDims[2] = Arrays.stream(inObj).mapToInt(x -> x.getData().getDimensions()[2]).sum();
if (maxBands > 0)
outputDims[2] = Math.min(maxBands, outputDims[2]);
assert Arrays.stream(inObj).allMatch(x -> x.getData().getDimensions()[0] == outputDims[0]) : "Inputs must be same size";
assert Arrays.stream(inObj).allMatch(x -> x.getData().getDimensions()[1] == outputDims[1]) : "Inputs must be same size";
@Nonnull final List<Tensor> outputTensors = new ArrayList<>();
for (int b = 0; b < numBatches; b++) {
@Nonnull final Tensor outputTensor = new Tensor(outputDims);
int pos = 0;
@Nullable final double[] outputTensorData = outputTensor.getData();
for (int i = 0; i < inObj.length; i++) {
@Nullable Tensor tensor = inObj[i].getData().get(b);
@Nullable final double[] data = tensor.getData();
System.arraycopy(data, 0, outputTensorData, pos, Math.min(data.length, outputTensorData.length - pos));
pos += data.length;
tensor.freeRef();
}
outputTensors.add(outputTensor);
}
return new Result(TensorArray.wrap(outputTensors.toArray(new Tensor[] {})), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList data) -> {
assert numBatches == data.length();
@Nonnull final List<Tensor[]> splitBatches = new ArrayList<>();
for (int b = 0; b < numBatches; b++) {
@Nullable final Tensor tensor = data.get(b);
@Nonnull final Tensor[] outputTensors2 = new Tensor[inObj.length];
int pos = 0;
for (int i = 0; i < inObj.length; i++) {
@Nonnull final Tensor dest = new Tensor(inObj[i].getData().getDimensions());
@Nullable double[] tensorData = tensor.getData();
System.arraycopy(tensorData, pos, dest.getData(), 0, Math.min(dest.length(), tensorData.length - pos));
pos += dest.length();
outputTensors2[i] = dest;
}
tensor.freeRef();
splitBatches.add(outputTensors2);
}
@Nonnull final Tensor[][] splitData = new Tensor[inObj.length][];
for (int i = 0; i < splitData.length; i++) {
splitData[i] = new Tensor[numBatches];
}
for (int i = 0; i < inObj.length; i++) {
for (int b = 0; b < numBatches; b++) {
splitData[i][b] = splitBatches.get(b)[i];
}
}
for (int i = 0; i < inObj.length; i++) {
@Nonnull TensorArray tensorArray = TensorArray.wrap(splitData[i]);
inObj[i].accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class SquareActivationLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
if (inObj.length != 1) {
throw new IllegalArgumentException("inObj.length=" + inObj.length);
}
Result input = inObj[0];
final TensorList inputData = input.getData();
@Nonnull final int[] dimensions = inputData.getDimensions();
final int length = inputData.length();
if (3 != dimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
}
return new Result(CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
@Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
// assert inputTensor.size == rPtr.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
CudaMemory lPtrMemory = inputTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
outputPtr.dirty();
lPtrMemory.dirty();
outputPtr.dirty();
lPtrMemory.freeRef();
inputTensor.freeRef();
opDescriptor.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, inputData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (input.isAlive()) {
@Nonnull TensorList data = CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
@Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
delta.freeRef();
@Nullable final CudaTensor inputTensor = gpu.getTensor(input.getData(), precision, MemoryType.Device, false);
// assert deltaTensor.size == inputTensor.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
CudaMemory rightTensorMemory = inputTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(2), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), rightTensorMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
deltaTensorMemory.dirty();
rightTensorMemory.dirty();
outputPtr.dirty();
deltaTensorMemory.freeRef();
rightTensorMemory.freeRef();
CudaTensor cudaTensor = new CudaTensor(outputPtr, outputDescriptor, precision);
Arrays.stream(new ReferenceCounting[] { deltaTensor, inputTensor, opDescriptor, outputDescriptor }).forEach(ReferenceCounting::freeRef);
outputPtr.freeRef();
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, delta);
input.accumulate(buffer, data);
} else {
delta.freeRef();
}
}) {
@Override
public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
inputData.freeRef();
input.freeRef();
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
Aggregations