use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class BatchDerivativeTester method test.
/**
* Test tolerance statistics.
*
* @param log
* @param component the component
* @param inputPrototype the input prototype
* @return the tolerance statistics
*/
@Override
public ToleranceStatistics test(@Nonnull final NotebookOutput log, @Nonnull final Layer component, @Nonnull final Tensor... inputPrototype) {
log.h1("Differential Validation");
@Nonnull IOPair ioPair = new IOPair(component, inputPrototype[0]).invoke();
if (verbose) {
log.code(() -> {
BatchDerivativeTester.log.info(String.format("Inputs: %s", Arrays.stream(inputPrototype).map(t -> t.prettyPrint()).reduce((a, b) -> a + ",\n" + b).get()));
BatchDerivativeTester.log.info(String.format("Inputs Statistics: %s", Arrays.stream(inputPrototype).map(x -> new ScalarStatistics().add(x.getData()).toString()).reduce((a, b) -> a + ",\n" + b).get()));
BatchDerivativeTester.log.info(String.format("Output: %s", ioPair.getOutputPrototype().prettyPrint()));
BatchDerivativeTester.log.info(String.format("Outputs Statistics: %s", new ScalarStatistics().add(ioPair.getOutputPrototype().getData())));
});
}
ToleranceStatistics _statistics = new ToleranceStatistics();
if (isTestFeedback()) {
log.h2("Feedback Validation");
log.p("We validate the agreement between the implemented derivative _of the inputs_ apply finite difference estimations:");
ToleranceStatistics statistics = _statistics;
_statistics = log.code(() -> {
return testFeedback(component, ioPair, statistics);
});
}
if (isTestLearning()) {
log.h2("Learning Validation");
log.p("We validate the agreement between the implemented derivative _of the internal weights_ apply finite difference estimations:");
ToleranceStatistics statistics = _statistics;
_statistics = log.code(() -> {
return testLearning(component, ioPair, statistics);
});
}
log.h2("Total Accuracy");
log.p("The overall agreement accuracy between the implemented derivative and the finite difference estimations:");
ToleranceStatistics statistics = _statistics;
log.code(() -> {
// log.info(String.format("Component: %s\nInputs: %s\noutput=%s", component, Arrays.toString(inputPrototype), outputPrototype));
BatchDerivativeTester.log.info(String.format("Finite-Difference Derivative Accuracy:"));
BatchDerivativeTester.log.info(String.format("absoluteTol: %s", statistics.absoluteTol));
BatchDerivativeTester.log.info(String.format("relativeTol: %s", statistics.relativeTol));
});
log.h2("Frozen and Alive Status");
log.code(() -> {
testFrozen(component, ioPair.getInputPrototype());
testUnFrozen(component, ioPair.getInputPrototype());
});
return _statistics;
}
use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class StandardLayerTests method tests.
private void tests(final NotebookOutput log, final List<ComponentTest<?>> tests, @Nonnull final Invocation invocation, @Nonnull final ArrayList<TestError> exceptions) {
tests.stream().filter(x -> null != x).forEach((ComponentTest<?> test) -> {
@Nonnull Layer layer = invocation.getLayer().copy();
Tensor[] inputs = randomize(invocation.getDims());
try {
test.test(log, layer, inputs);
} catch (LifecycleException e) {
throw e;
} catch (Throwable e) {
exceptions.add(new TestError(e, test, layer));
} finally {
for (@Nonnull Tensor tensor : inputs) tensor.freeRef();
layer.freeRef();
test.freeRef();
System.gc();
}
});
}
use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class StandardLayerTests method bigTests.
/**
* Big tests.
*
* @param log the log
* @param seed the seed
* @param perfLayer the perf layer
* @param exceptions the exceptions
*/
public void bigTests(NotebookOutput log, long seed, @Nonnull Layer perfLayer, @Nonnull ArrayList<TestError> exceptions) {
getBigTests().stream().filter(x -> null != x).forEach(test -> {
@Nonnull Layer layer = perfLayer.copy();
try {
Tensor[] input = randomize(getLargeDims(new Random(seed)));
try {
test.test(log, layer, input);
} finally {
for (@Nonnull Tensor t : input) {
t.freeRef();
}
}
} catch (LifecycleException e) {
throw e;
} catch (CudaError e) {
throw e;
} catch (Throwable e) {
exceptions.add(new TestError(e, test, layer));
} finally {
layer.freeRef();
test.freeRef();
System.gc();
}
});
}
use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class TrainingTester method trainLBFGS.
/**
* Train lbfgs list.
*
* @param log the log
* @param trainable the trainable
* @return the list
*/
@Nonnull
public List<StepRecord> trainLBFGS(@Nonnull final NotebookOutput log, final Trainable trainable) {
log.p("Next, we apply the same optimization using L-BFGS, which is nearly ideal for purely second-order or quadratic functions.");
@Nonnull final List<StepRecord> history = new ArrayList<>();
@Nonnull final TrainingMonitor monitor = TrainingTester.getMonitor(history);
try {
log.code(() -> {
return new IterativeTrainer(trainable).setLineSearchFactory(label -> new ArmijoWolfeSearch()).setOrientation(new LBFGS()).setMonitor(monitor).setTimeout(30, TimeUnit.SECONDS).setIterationsPerSample(100).setMaxIterations(250).setTerminateThreshold(0).runAndFree();
});
} catch (Throwable e) {
if (isThrowExceptions())
throw new RuntimeException(e);
}
return history;
}
use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class TrainingTester method testModelLearning.
/**
* Test model learning.
*
* @param log the log
* @param component the component
* @param random the randomize
* @param inputPrototype the input prototype
* @return the apply result
*/
public TestResult testModelLearning(@Nonnull final NotebookOutput log, @Nonnull final Layer component, final Random random, final Tensor[] inputPrototype) {
@Nonnull final Layer network_target = shuffle(random, component.copy()).freeze();
final Tensor[][] input_target = shuffleCopy(random, inputPrototype);
log.p("In this apply, attempt to train a network to emulate a randomized network given an example input/output. The target state is:");
log.code(() -> {
return network_target.state().stream().map(Arrays::toString).reduce((a, b) -> a + "\n" + b).orElse("");
});
Result[] array = ConstantResult.batchResultArray(input_target);
Result eval = network_target.eval(array);
Arrays.stream(array).forEach(ReferenceCounting::freeRef);
TensorList result = eval.getData();
eval.freeRef();
final Tensor[] output_target = result.stream().toArray(i -> new Tensor[i]);
result.freeRef();
if (output_target.length != input_target.length) {
logger.info("Batch layers not supported");
return null;
}
return trainAll("Model Convergence", log, append(input_target, output_target), shuffle(random, component.copy()));
}
Aggregations