use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class BandReducerLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
final Result input = inObj[0];
final TensorList batch = input.getData();
@Nonnull final int[] inputSize = batch.getDimensions();
@Nonnull PoolingLayer impl = new PoolingLayer().setMode(mode).setPrecision(precision).setWindowX(inputSize[1]).setWindowY(inputSize[0]).setAlpha(alpha);
@Nullable Result result = impl.evalAndFree(inObj);
impl.freeRef();
return result;
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class ConvolutionLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
final Tensor kernel = getKernel();
kernel.addRef();
assert kernel.isValid();
assert 1 == inObj.length;
assert 3 == inObj[0].getData().getDimensions().length;
assert inputBands == inObj[0].getData().getDimensions()[2] : Arrays.toString(inObj[0].getData().getDimensions()) + "[2] != " + inputBands;
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
@Nonnull ExplodedConvolutionGrid grid = getExplodedNetwork();
@Nonnull PipelineNetwork network = grid.getNetwork();
if (isFrozen()) {
network.freeze();
}
final Result result = network.evalAndFree(inObj);
network.freeRef();
final TensorList resultData = result.getData();
assert inObj[0].getData().length() == resultData.length();
assert 3 == resultData.getDimensions().length;
assert outputBands == resultData.getDimensions()[2];
ConvolutionLayer.this.addRef();
return new Result(resultData, (@Nonnull final DeltaSet<Layer> deltaSet, @Nonnull final TensorList delta) -> {
result.accumulate(deltaSet, delta);
if (!isFrozen()) {
Tensor read = grid.read(deltaSet, true);
deltaSet.get(ConvolutionLayer.this, kernel.getData()).addInPlace(read.getData()).freeRef();
read.freeRef();
}
}) {
@Override
public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
grid.freeRef();
result.freeRef();
kernel.freeRef();
ConvolutionLayer.this.freeRef();
}
@Override
public boolean isAlive() {
return result.isAlive();
}
};
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class BasicTrainable method eval.
/**
* Eval point sample.
*
* @param list the list
* @param monitor the monitor
* @return the point sample
*/
@Nonnull
protected PointSample eval(@Nonnull final List<Tensor[]> list, @Nullable final TrainingMonitor monitor) {
@Nonnull final TimedResult<PointSample> timedResult = TimedResult.time(() -> {
final Result[] nnContext = BasicTrainable.getNNContext(list, mask);
final Result result = network.eval(nnContext);
for (@Nonnull Result nnResult : nnContext) {
nnResult.getData().freeRef();
nnResult.freeRef();
}
final TensorList resultData = result.getData();
@Nonnull final DeltaSet<Layer> deltaSet = new DeltaSet<Layer>();
@Nonnull StateSet<Layer> stateSet = null;
try {
final DoubleSummaryStatistics statistics = resultData.stream().flatMapToDouble(x -> {
double[] array = Arrays.stream(x.getData()).toArray();
x.freeRef();
return Arrays.stream(array);
}).summaryStatistics();
final double sum = statistics.getSum();
result.accumulate(deltaSet, 1.0);
stateSet = new StateSet<>(deltaSet);
// log.info(String.format("Evaluated to %s delta buffers, %s mag", DeltaSet<LayerBase>.getMap().size(), DeltaSet<LayerBase>.getMagnitude()));
return new PointSample(deltaSet, stateSet, sum, 0.0, list.size());
} finally {
if (null != stateSet)
stateSet.freeRef();
resultData.freeRefAsync();
result.freeRefAsync();
deltaSet.freeRefAsync();
}
});
if (null != monitor && verbosity() > 0) {
monitor.log(String.format("Device completed %s items in %.3f sec", list.size(), timedResult.timeNanos / 1e9));
}
@Nonnull PointSample normalize = timedResult.result.normalize();
timedResult.result.freeRef();
return normalize;
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class TensorListTrainable method eval.
/**
* Eval point sample.
*
* @param list the list
* @param monitor the monitor
* @return the point sample
*/
@Nonnull
protected PointSample eval(@Nonnull final TensorList[] list, @Nullable final TrainingMonitor monitor) {
int inputs = data.length;
assert 0 < inputs;
int items = data[0].length();
assert 0 < items;
@Nonnull final TimedResult<PointSample> timedResult = TimedResult.time(() -> {
final Result[] nnContext = TensorListTrainable.getNNContext(list, mask);
final Result result = network.eval(nnContext);
for (@Nonnull Result nnResult : nnContext) {
nnResult.getData().freeRef();
nnResult.freeRef();
}
final TensorList resultData = result.getData();
final DoubleSummaryStatistics statistics = resultData.stream().flatMapToDouble(x -> {
double[] array = Arrays.stream(x.getData()).toArray();
x.freeRef();
return Arrays.stream(array);
}).summaryStatistics();
final double sum = statistics.getSum();
@Nonnull final DeltaSet<Layer> deltaSet = new DeltaSet<Layer>();
@Nonnull PointSample pointSample;
try {
result.accumulate(deltaSet, 1.0);
// log.info(String.format("Evaluated to %s delta buffers, %s mag", DeltaSet<LayerBase>.getMap().size(), DeltaSet<LayerBase>.getMagnitude()));
@Nonnull StateSet<Layer> stateSet = new StateSet<>(deltaSet);
pointSample = new PointSample(deltaSet, stateSet, sum, 0.0, items);
stateSet.freeRef();
} finally {
resultData.freeRef();
result.freeRef();
deltaSet.freeRef();
}
return pointSample;
});
if (null != monitor && verbosity() > 0) {
monitor.log(String.format("Device completed %s items in %.3f sec", items, timedResult.timeNanos / 1e9));
}
@Nonnull PointSample normalize = timedResult.result.normalize();
timedResult.result.freeRef();
return normalize;
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class CudaLayerTester method testNonstandardBoundsBackprop.
/**
* Test nonstandard bounds backprop tolerance statistics.
*
* @param log the log
* @param layer the layer
* @param inputPrototype the input prototype
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics testNonstandardBoundsBackprop(final NotebookOutput log, @Nullable final Layer layer, @Nonnull final Tensor[] inputPrototype) {
log.h2("Irregular Backprop");
log.p("This layer should accept non-dense tensors as delta input.");
return log.code(() -> {
Tensor[] randomized = Arrays.stream(inputPrototype).map(x -> x.map(v -> getRandom())).toArray(i -> new Tensor[i]);
logger.info("Input: " + Arrays.stream(randomized).map(Tensor::prettyPrint).collect(Collectors.toList()));
Precision precision = Precision.Double;
TensorList[] controlInput = Arrays.stream(randomized).map(original -> {
return TensorArray.wrap(original);
}).toArray(i -> new TensorList[i]);
@Nonnull final SimpleResult testResult = CudaSystem.run(gpu -> {
TensorList[] copy = copy(controlInput);
SimpleResult result = new SimpleGpuEval(layer, gpu, copy) {
@Nonnull
@Override
public TensorList getFeedback(@Nonnull final TensorList original) {
Tensor originalTensor = original.get(0).mapAndFree(x -> 1);
CudaTensorList cudaTensorList = buildIrregularCudaTensor(gpu, precision, originalTensor);
originalTensor.freeRef();
return cudaTensorList;
}
}.call();
Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
return result;
});
@Nonnull final SimpleResult controlResult = CudaSystem.run(gpu -> {
TensorList[] copy = copy(controlInput);
SimpleResult result = SimpleGpuEval.run(layer, gpu, copy);
Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
return result;
}, 1);
try {
ToleranceStatistics compareOutput = compareOutput(controlResult, testResult);
ToleranceStatistics compareDerivatives = compareDerivatives(controlResult, testResult);
return compareDerivatives.combine(compareOutput);
} finally {
Arrays.stream(controlInput).forEach(ReferenceCounting::freeRef);
controlResult.freeRef();
testResult.freeRef();
}
});
}
Aggregations