use of com.simiacryptus.mindseye.eval.Trainable in project MindsEye by SimiaCryptus.
the class RecursiveSubspace method buildSubspace.
/**
* Build subspace nn layer.
*
* @param subject the subject
* @param measurement the measurement
* @param monitor the monitor
* @return the nn layer
*/
@Nullable
public Layer buildSubspace(@Nonnull Trainable subject, @Nonnull PointSample measurement, @Nonnull TrainingMonitor monitor) {
@Nonnull PointSample origin = measurement.copyFull().backup();
@Nonnull final DeltaSet<Layer> direction = measurement.delta.scale(-1);
final double magnitude = direction.getMagnitude();
if (Math.abs(magnitude) < 1e-10) {
monitor.log(String.format("Zero gradient: %s", magnitude));
} else if (Math.abs(magnitude) < 1e-5) {
monitor.log(String.format("Low gradient: %s", magnitude));
}
boolean hasPlaceholders = direction.getMap().entrySet().stream().filter(x -> x.getKey() instanceof PlaceholderLayer).findAny().isPresent();
List<Layer> deltaLayers = direction.getMap().entrySet().stream().map(x -> x.getKey()).filter(x -> !(x instanceof PlaceholderLayer)).collect(Collectors.toList());
int size = deltaLayers.size() + (hasPlaceholders ? 1 : 0);
if (null == weights || weights.length != size)
weights = new double[size];
return new LayerBase() {
@Nonnull
Layer self = this;
@Nonnull
@Override
public Result eval(Result... array) {
assertAlive();
origin.restore();
IntStream.range(0, deltaLayers.size()).forEach(i -> {
direction.getMap().get(deltaLayers.get(i)).accumulate(weights[hasPlaceholders ? (i + 1) : i]);
});
if (hasPlaceholders) {
direction.getMap().entrySet().stream().filter(x -> x.getKey() instanceof PlaceholderLayer).distinct().forEach(entry -> entry.getValue().accumulate(weights[0]));
}
PointSample measure = subject.measure(monitor);
double mean = measure.getMean();
monitor.log(String.format("RecursiveSubspace: %s <- %s", mean, Arrays.toString(weights)));
direction.addRef();
return new Result(TensorArray.wrap(new Tensor(mean)), (DeltaSet<Layer> buffer, TensorList data) -> {
DoubleStream deltaStream = deltaLayers.stream().mapToDouble(layer -> {
Delta<Layer> a = direction.getMap().get(layer);
Delta<Layer> b = measure.delta.getMap().get(layer);
return b.dot(a) / Math.max(Math.sqrt(a.dot(a)), 1e-8);
});
if (hasPlaceholders) {
deltaStream = DoubleStream.concat(DoubleStream.of(direction.getMap().keySet().stream().filter(x -> x instanceof PlaceholderLayer).distinct().mapToDouble(layer -> {
Delta<Layer> a = direction.getMap().get(layer);
Delta<Layer> b = measure.delta.getMap().get(layer);
return b.dot(a) / Math.max(Math.sqrt(a.dot(a)), 1e-8);
}).sum()), deltaStream);
}
buffer.get(self, weights).addInPlace(deltaStream.toArray()).freeRef();
}) {
@Override
protected void _free() {
measure.freeRef();
direction.freeRef();
}
@Override
public boolean isAlive() {
return true;
}
};
}
@Override
protected void _free() {
direction.freeRef();
origin.freeRef();
super._free();
}
@Nonnull
@Override
public JsonObject getJson(Map<CharSequence, byte[]> resources, DataSerializer dataSerializer) {
throw new IllegalStateException();
}
@Nullable
@Override
public List<double[]> state() {
return null;
}
};
}
use of com.simiacryptus.mindseye.eval.Trainable in project MindsEye by SimiaCryptus.
the class RecursiveSubspace method train.
/**
* Train.
*
* @param monitor the monitor
* @param macroLayer the macro layer
*/
public void train(@Nonnull TrainingMonitor monitor, Layer macroLayer) {
@Nonnull BasicTrainable inner = new BasicTrainable(macroLayer);
// @javax.annotation.Nonnull Tensor tensor = new Tensor();
@Nonnull ArrayTrainable trainable = new ArrayTrainable(inner, new Tensor[][] { {} });
inner.freeRef();
// tensor.freeRef();
new IterativeTrainer(trainable).setOrientation(new LBFGS()).setLineSearchFactory(n -> new ArmijoWolfeSearch()).setMonitor(new TrainingMonitor() {
@Override
public void log(String msg) {
monitor.log("\t" + msg);
}
}).setMaxIterations(getIterations()).setIterationsPerSample(getIterations()).setTerminateThreshold(terminateThreshold).runAndFree();
trainable.freeRef();
}
use of com.simiacryptus.mindseye.eval.Trainable in project MindsEye by SimiaCryptus.
the class StaticRateTest method train.
@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
log.code(() -> {
@Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
@Nonnull final Trainable trainable = new SampledArrayTrainable(trainingData, supervisedNetwork, 1000);
return new IterativeTrainer(trainable).setMonitor(monitor).setOrientation(new GradientDescent()).setLineSearchFactory((@Nonnull final CharSequence name) -> new StaticLearningRate(0.001)).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
});
}
use of com.simiacryptus.mindseye.eval.Trainable in project MindsEye by SimiaCryptus.
the class L1NormalizationTest method train.
@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
log.code(() -> {
@Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
@Nonnull final Trainable trainable = new L12Normalizer(new SampledArrayTrainable(trainingData, supervisedNetwork, 1000)) {
@Override
public Layer getLayer() {
return inner.getLayer();
}
@Override
protected double getL1(final Layer layer) {
return 1.0;
}
@Override
protected double getL2(final Layer layer) {
return 0;
}
};
return new IterativeTrainer(trainable).setMonitor(monitor).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
});
}
use of com.simiacryptus.mindseye.eval.Trainable in project MindsEye by SimiaCryptus.
the class SimpleGradientDescentTest method train.
@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
log.p("Training a model involves a few different components. First, our model is combined mapCoords a loss function. " + "Then we take that model and combine it mapCoords our training data to define a trainable object. " + "Finally, we use a simple iterative scheme to refine the weights of our model. " + "The final output is the last output value of the loss function when evaluating the last batch.");
log.code(() -> {
@Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
@Nonnull final ArrayList<Tensor[]> trainingList = new ArrayList<>(Arrays.stream(trainingData).collect(Collectors.toList()));
Collections.shuffle(trainingList);
@Nonnull final Tensor[][] randomSelection = trainingList.subList(0, 10000).toArray(new Tensor[][] {});
@Nonnull final Trainable trainable = new ArrayTrainable(randomSelection, supervisedNetwork);
return new IterativeTrainer(trainable).setMonitor(monitor).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
});
}
Aggregations