Search in sources :

Example 1 with SimpleLossNetwork

use of com.simiacryptus.mindseye.network.SimpleLossNetwork in project MindsEye by SimiaCryptus.

the class GDTest method train.

@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
    log.code(() -> {
        @Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
        @Nonnull final Trainable trainable = new SampledArrayTrainable(trainingData, supervisedNetwork, 1000);
        return new IterativeTrainer(trainable).setMonitor(monitor).setOrientation(new GradientDescent()).setTimeout(5, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
    });
}
Also used : IterativeTrainer(com.simiacryptus.mindseye.opt.IterativeTrainer) Nonnull(javax.annotation.Nonnull) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) SimpleLossNetwork(com.simiacryptus.mindseye.network.SimpleLossNetwork) Trainable(com.simiacryptus.mindseye.eval.Trainable) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable)

Example 2 with SimpleLossNetwork

use of com.simiacryptus.mindseye.network.SimpleLossNetwork in project MindsEye by SimiaCryptus.

the class MomentumTest method train.

@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
    log.code(() -> {
        @Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
        @Nonnull final Trainable trainable = new SampledArrayTrainable(trainingData, supervisedNetwork, 1000);
        return new IterativeTrainer(trainable).setMonitor(monitor).setOrientation(new ValidatingOrientationWrapper(new MomentumStrategy(new GradientDescent()).setCarryOver(0.8))).setTimeout(5, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
    });
}
Also used : IterativeTrainer(com.simiacryptus.mindseye.opt.IterativeTrainer) Nonnull(javax.annotation.Nonnull) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) SimpleLossNetwork(com.simiacryptus.mindseye.network.SimpleLossNetwork) Trainable(com.simiacryptus.mindseye.eval.Trainable) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable)

Example 3 with SimpleLossNetwork

use of com.simiacryptus.mindseye.network.SimpleLossNetwork in project MindsEye by SimiaCryptus.

the class QQNTest method train.

@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
    log.code(() -> {
        @Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
        // return new IterativeTrainer(new SampledArrayTrainable(trainingData, supervisedNetwork, 10000))
        @Nonnull ValidatingTrainer trainer = new ValidatingTrainer(new SampledArrayTrainable(trainingData, supervisedNetwork, 1000, 10000), new ArrayTrainable(trainingData, supervisedNetwork)).setMonitor(monitor);
        trainer.getRegimen().get(0).setOrientation(new QQN());
        return trainer.setTimeout(5, TimeUnit.MINUTES).setMaxIterations(500).run();
    });
}
Also used : Nonnull(javax.annotation.Nonnull) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) ValidatingTrainer(com.simiacryptus.mindseye.opt.ValidatingTrainer) ArrayTrainable(com.simiacryptus.mindseye.eval.ArrayTrainable) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) SimpleLossNetwork(com.simiacryptus.mindseye.network.SimpleLossNetwork)

Example 4 with SimpleLossNetwork

use of com.simiacryptus.mindseye.network.SimpleLossNetwork in project MindsEye by SimiaCryptus.

the class LinearSumConstraintTest method train.

@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
    log.code(() -> {
        @Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
        @Nonnull final Trainable trainable = new SampledArrayTrainable(trainingData, supervisedNetwork, 10000);
        @Nonnull final TrustRegionStrategy trustRegionStrategy = new TrustRegionStrategy() {

            @Override
            public TrustRegion getRegionPolicy(final Layer layer) {
                return new LinearSumConstraint();
            }
        };
        return new IterativeTrainer(trainable).setIterationsPerSample(100).setMonitor(monitor).setOrientation(trustRegionStrategy).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
    });
}
Also used : IterativeTrainer(com.simiacryptus.mindseye.opt.IterativeTrainer) Nonnull(javax.annotation.Nonnull) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) SimpleLossNetwork(com.simiacryptus.mindseye.network.SimpleLossNetwork) Trainable(com.simiacryptus.mindseye.eval.Trainable) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) Layer(com.simiacryptus.mindseye.lang.Layer) TrustRegionStrategy(com.simiacryptus.mindseye.opt.orient.TrustRegionStrategy)

Example 5 with SimpleLossNetwork

use of com.simiacryptus.mindseye.network.SimpleLossNetwork in project MindsEye by SimiaCryptus.

the class L2NormalizationTest method train.

@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
    log.p("Training a model involves a few different components. First, our model is combined mapCoords a loss function. " + "Then we take that model and combine it mapCoords our training data to define a trainable object. " + "Finally, we use a simple iterative scheme to refine the weights of our model. " + "The final output is the last output value of the loss function when evaluating the last batch.");
    log.code(() -> {
        @Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
        @Nonnull final Trainable trainable = new L12Normalizer(new SampledArrayTrainable(trainingData, supervisedNetwork, 1000)) {

            @Override
            public Layer getLayer() {
                return inner.getLayer();
            }

            @Override
            protected double getL1(final Layer layer) {
                return 0.0;
            }

            @Override
            protected double getL2(final Layer layer) {
                return 1e4;
            }
        };
        return new IterativeTrainer(trainable).setMonitor(monitor).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
    });
}
Also used : IterativeTrainer(com.simiacryptus.mindseye.opt.IterativeTrainer) Nonnull(javax.annotation.Nonnull) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) L12Normalizer(com.simiacryptus.mindseye.eval.L12Normalizer) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) SimpleLossNetwork(com.simiacryptus.mindseye.network.SimpleLossNetwork) Trainable(com.simiacryptus.mindseye.eval.Trainable) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) Layer(com.simiacryptus.mindseye.lang.Layer)

Aggregations

SimpleLossNetwork (com.simiacryptus.mindseye.network.SimpleLossNetwork)18 Nonnull (javax.annotation.Nonnull)18 EntropyLossLayer (com.simiacryptus.mindseye.layers.java.EntropyLossLayer)17 SampledArrayTrainable (com.simiacryptus.mindseye.eval.SampledArrayTrainable)16 Trainable (com.simiacryptus.mindseye.eval.Trainable)13 IterativeTrainer (com.simiacryptus.mindseye.opt.IterativeTrainer)13 Layer (com.simiacryptus.mindseye.lang.Layer)6 ArrayTrainable (com.simiacryptus.mindseye.eval.ArrayTrainable)5 ValidatingTrainer (com.simiacryptus.mindseye.opt.ValidatingTrainer)4 GradientDescent (com.simiacryptus.mindseye.opt.orient.GradientDescent)4 TrustRegionStrategy (com.simiacryptus.mindseye.opt.orient.TrustRegionStrategy)3 L12Normalizer (com.simiacryptus.mindseye.eval.L12Normalizer)2 TrainingMonitor (com.simiacryptus.mindseye.opt.TrainingMonitor)2 QuadraticSearch (com.simiacryptus.mindseye.opt.line.QuadraticSearch)2 ArrayList (java.util.ArrayList)2 Lists (com.google.common.collect.Lists)1 ConstantResult (com.simiacryptus.mindseye.lang.ConstantResult)1 Tensor (com.simiacryptus.mindseye.lang.Tensor)1 TensorArray (com.simiacryptus.mindseye.lang.TensorArray)1 TensorList (com.simiacryptus.mindseye.lang.TensorList)1