use of com.simiacryptus.mindseye.lang.TensorArray in project MindsEye by SimiaCryptus.
the class SumInputsLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
Arrays.stream(inObj).forEach(x -> x.getData().addRef());
return new Result(Arrays.stream(inObj).parallel().map(x -> {
TensorList data = x.getData();
data.addRef();
return data;
}).reduce((l, r) -> {
assert l.length() == r.length() || 1 == l.length() || 1 == r.length();
@Nonnull TensorArray sum = TensorArray.wrap(IntStream.range(0, l.length()).parallel().mapToObj(i -> {
@Nullable final Tensor left = l.get(1 == l.length() ? 0 : i);
@Nullable final Tensor right = r.get(1 == r.length() ? 0 : i);
@Nullable Tensor tensor;
if (right.length() == 1) {
tensor = left.mapParallel(v -> v + right.get(0));
} else {
tensor = left.reduceParallel(right, (v1, v2) -> v1 + v2);
}
left.freeRef();
right.freeRef();
return tensor;
}).toArray(i -> new Tensor[i]));
l.freeRef();
r.freeRef();
return sum;
}).get(), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
for (@Nonnull final Result input : inObj) {
if (input.isAlive()) {
@Nonnull TensorList projectedDelta = delta;
if (1 < projectedDelta.length() && input.getData().length() == 1) {
projectedDelta = TensorArray.wrap(projectedDelta.stream().parallel().reduce((a, b) -> {
@Nullable Tensor c = a.addAndFree(b);
b.freeRef();
return c;
}).get());
} else {
projectedDelta.addRef();
}
if (1 < Tensor.length(projectedDelta.getDimensions()) && Tensor.length(input.getData().getDimensions()) == 1) {
Tensor[] data = projectedDelta.stream().map(t -> new Tensor(new double[] { t.sum() })).toArray(i -> new Tensor[i]);
@Nonnull TensorArray data2 = TensorArray.wrap(data);
projectedDelta.freeRef();
projectedDelta = data2;
}
input.accumulate(buffer, projectedDelta);
}
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
Arrays.stream(inObj).forEach(x -> x.getData().freeRef());
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
use of com.simiacryptus.mindseye.lang.TensorArray in project MindsEye by SimiaCryptus.
the class AvgPoolingLayer method eval.
@Nonnull
@SuppressWarnings("unchecked")
@Override
public Result eval(@Nonnull final Result... inObj) {
final int kernelSize = Tensor.length(kernelDims);
final TensorList data = inObj[0].getData();
@Nonnull final int[] inputDims = data.getDimensions();
final int[] newDims = IntStream.range(0, inputDims.length).map(i -> {
assert 0 == inputDims[i] % kernelDims[i] : inputDims[i] + ":" + kernelDims[i];
return inputDims[i] / kernelDims[i];
}).toArray();
final Map<Coordinate, List<int[]>> coordMap = AvgPoolingLayer.getCoordMap(kernelDims, newDims);
final Tensor[] outputValues = IntStream.range(0, data.length()).mapToObj(dataIndex -> {
@Nullable final Tensor input = data.get(dataIndex);
@Nonnull final Tensor output = new Tensor(newDims);
for (@Nonnull final Entry<Coordinate, List<int[]>> entry : coordMap.entrySet()) {
double sum = entry.getValue().stream().mapToDouble(inputCoord -> input.get(inputCoord)).sum();
if (Double.isFinite(sum)) {
output.add(entry.getKey(), sum / kernelSize);
}
}
input.freeRef();
return output;
}).toArray(i -> new Tensor[i]);
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
return new Result(TensorArray.wrap(outputValues), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (inObj[0].isAlive()) {
final Tensor[] passback = IntStream.range(0, delta.length()).mapToObj(dataIndex -> {
@Nullable Tensor tensor = delta.get(dataIndex);
@Nonnull final Tensor backSignal = new Tensor(inputDims);
for (@Nonnull final Entry<Coordinate, List<int[]>> outputMapping : coordMap.entrySet()) {
final double outputValue = tensor.get(outputMapping.getKey());
for (@Nonnull final int[] inputCoord : outputMapping.getValue()) {
backSignal.add(inputCoord, outputValue / kernelSize);
}
}
tensor.freeRef();
return backSignal;
}).toArray(i -> new Tensor[i]);
@Nonnull TensorArray tensorArray = TensorArray.wrap(passback);
inObj[0].accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return inObj[0].isAlive();
}
};
}
use of com.simiacryptus.mindseye.lang.TensorArray in project MindsEye by SimiaCryptus.
the class CudaTensorList method toHeap.
private TensorArray toHeap(final boolean avoidAllocations) {
CudaTensor gpuCopy = this.gpuCopy;
TimedResult<TensorArray> timedResult = TimedResult.time(() -> CudaDevice.run(gpu -> {
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
if (null == gpuCopy) {
if (null == heapCopy) {
throw new IllegalStateException("No data");
} else if (heapCopy.isFinalized()) {
throw new IllegalStateException("Local data has been freed");
}
}
gpuCopy.addRef();
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
try {
assert getPrecision() == gpuCopy.getPrecision();
assert getPrecision() == gpuCopy.descriptor.dataType;
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
final Tensor[] output = IntStream.range(0, getLength()).mapToObj(dataIndex -> new Tensor(getDimensions())).toArray(i -> new Tensor[i]);
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
for (int i = 0; i < getLength(); i++) {
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
gpuCopy.read(gpu, i, output[i], avoidAllocations);
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
}
return TensorArray.wrap(output);
} finally {
gpuCopy.freeRef();
}
}, this));
CudaTensorList.logger.debug(String.format("Read %s bytes in %.4f from Tensor %s on GPU at %s, created by %s", gpuCopy.size(), timedResult.seconds(), Integer.toHexString(System.identityHashCode(timedResult.result)), TestUtil.toString(TestUtil.getStackTrace()).replaceAll("\n", "\n\t"), TestUtil.toString(createdBy).replaceAll("\n", "\n\t")));
return timedResult.result;
}
use of com.simiacryptus.mindseye.lang.TensorArray in project MindsEye by SimiaCryptus.
the class CudaTensorList method heapCopy.
/**
* Inner tensor list.
*
* @param avoidAllocations
* @return the tensor list
*/
@Nullable
private TensorArray heapCopy(final boolean avoidAllocations) {
TensorArray heapCopy;
heapCopy = this.heapCopy;
if (null == heapCopy || heapCopy.isFinalized()) {
TensorArray copy = toHeap(avoidAllocations);
final TensorArray prev;
synchronized (this) {
heapCopy = this.heapCopy;
if (null == heapCopy || heapCopy.isFinalized()) {
prev = this.heapCopy;
this.heapCopy = copy;
heapCopy = copy;
} else {
prev = null;
}
}
if (null != prev)
prev.freeRef();
}
return heapCopy;
}
use of com.simiacryptus.mindseye.lang.TensorArray in project MindsEye by SimiaCryptus.
the class GaussianNoiseLayer method eval.
@Nonnull
@Override
public Result eval(final Result... inObj) {
final Result in0 = inObj[0];
final TensorList inputData = in0.getData();
final int itemCnt = inputData.length();
in0.addRef();
inputData.addRef();
final Tensor[] outputA = IntStream.range(0, itemCnt).mapToObj(dataIndex -> {
@Nonnull final Random random = new Random(seed);
@Nullable final Tensor input = inputData.get(dataIndex);
@Nullable final Tensor output = input.map(x -> {
return x + random.nextGaussian() * getValue();
});
input.freeRef();
return output;
}).toArray(i -> new Tensor[i]);
return new Result(TensorArray.wrap(outputA), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (in0.isAlive()) {
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, delta.length()).mapToObj(dataIndex -> {
Tensor tensor = delta.get(dataIndex);
@Nullable final double[] deltaData = tensor.getData();
@Nonnull final Tensor passback = new Tensor(inputData.getDimensions());
for (int i = 0; i < passback.length(); i++) {
passback.set(i, deltaData[i]);
}
tensor.freeRef();
return passback;
}).toArray(i -> new Tensor[i]));
in0.accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
inputData.freeRef();
in0.freeRef();
}
@Override
public boolean isAlive() {
return in0.isAlive() || !isFrozen();
}
};
}
Aggregations