use of com.simiacryptus.mindseye.opt.orient.TrustRegionStrategy in project MindsEye by SimiaCryptus.
the class LinearSumConstraintTest method train.
@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
log.code(() -> {
@Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
@Nonnull final Trainable trainable = new SampledArrayTrainable(trainingData, supervisedNetwork, 10000);
@Nonnull final TrustRegionStrategy trustRegionStrategy = new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new LinearSumConstraint();
}
};
return new IterativeTrainer(trainable).setIterationsPerSample(100).setMonitor(monitor).setOrientation(trustRegionStrategy).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
});
}
use of com.simiacryptus.mindseye.opt.orient.TrustRegionStrategy in project MindsEye by SimiaCryptus.
the class SingleOrthantTrustRegionTest method train.
@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
log.code(() -> {
@Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
@Nonnull final Trainable trainable = new SampledArrayTrainable(trainingData, supervisedNetwork, 10000);
@Nonnull final TrustRegionStrategy trustRegionStrategy = new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new SingleOrthant();
}
};
return new IterativeTrainer(trainable).setIterationsPerSample(100).setMonitor(monitor).setOrientation(trustRegionStrategy).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
});
}
use of com.simiacryptus.mindseye.opt.orient.TrustRegionStrategy in project MindsEye by SimiaCryptus.
the class DeepDream method train.
/**
* Train buffered image.
*
* @param server the server
* @param log the log
* @param canvasImage the canvas image
* @param network the network
* @param precision the precision
* @param trainingMinutes the training minutes
* @return the buffered image
*/
@Nonnull
public BufferedImage train(final StreamNanoHTTPD server, @Nonnull final NotebookOutput log, final BufferedImage canvasImage, final PipelineNetwork network, final Precision precision, final int trainingMinutes) {
System.gc();
Tensor canvas = Tensor.fromRGB(canvasImage);
TestUtil.monitorImage(canvas, false, false);
network.setFrozen(true);
ArtistryUtil.setPrecision(network, precision);
@Nonnull Trainable trainable = new ArrayTrainable(network, 1).setVerbose(true).setMask(true).setData(Arrays.asList(new Tensor[][] { { canvas } }));
TestUtil.instrumentPerformance(network);
if (null != server)
ArtistryUtil.addLayersHandler(network, server);
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
new IterativeTrainer(trainable).setMonitor(TestUtil.getMonitor(history)).setIterationsPerSample(100).setOrientation(new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new RangeConstraint();
}
}).setLineSearchFactory(name -> new BisectionSearch().setSpanTol(1e-1).setCurrentRate(1e3)).setTimeout(trainingMinutes, TimeUnit.MINUTES).setTerminateThreshold(Double.NEGATIVE_INFINITY).runAndFree();
return TestUtil.plot(history);
});
return canvas.toImage();
}
use of com.simiacryptus.mindseye.opt.orient.TrustRegionStrategy in project MindsEye by SimiaCryptus.
the class StyleTransfer method styleTransfer.
/**
* Style transfer buffered image.
*
* @param server the server
* @param log the log
* @param canvasImage the canvas image
* @param styleParameters the style parameters
* @param trainingMinutes the training minutes
* @param measureStyle the measure style
* @return the buffered image
*/
public BufferedImage styleTransfer(final StreamNanoHTTPD server, @Nonnull final NotebookOutput log, final BufferedImage canvasImage, final StyleSetup<T> styleParameters, final int trainingMinutes, final NeuralSetup measureStyle) {
BufferedImage result = ArtistryUtil.logExceptionWithDefault(log, () -> {
log.p("Input Content:");
log.p(log.image(styleParameters.contentImage, "Content Image"));
log.p("Style Content:");
styleParameters.styleImages.forEach((file, styleImage) -> {
log.p(log.image(styleImage, file));
});
log.p("Input Canvas:");
log.p(log.image(canvasImage, "Input Canvas"));
System.gc();
Tensor canvas = Tensor.fromRGB(canvasImage);
TestUtil.monitorImage(canvas, false, false);
log.p("Input Parameters:");
log.code(() -> {
return ArtistryUtil.toJson(styleParameters);
});
Trainable trainable = log.code(() -> {
PipelineNetwork network = fitnessNetwork(measureStyle);
network.setFrozen(true);
ArtistryUtil.setPrecision(network, styleParameters.precision);
TestUtil.instrumentPerformance(network);
if (null != server)
ArtistryUtil.addLayersHandler(network, server);
return new ArrayTrainable(network, 1).setVerbose(true).setMask(true).setData(Arrays.asList(new Tensor[][] { { canvas } }));
});
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
new IterativeTrainer(trainable).setMonitor(TestUtil.getMonitor(history)).setOrientation(new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new RangeConstraint().setMin(1e-2).setMax(256);
}
}).setIterationsPerSample(100).setLineSearchFactory(name -> new BisectionSearch().setSpanTol(1e-1).setCurrentRate(1e6)).setTimeout(trainingMinutes, TimeUnit.MINUTES).setTerminateThreshold(Double.NEGATIVE_INFINITY).runAndFree();
return TestUtil.plot(history);
});
return canvas.toImage();
}, canvasImage);
log.p("Output Canvas:");
log.p(log.image(result, "Output Canvas"));
return result;
}
use of com.simiacryptus.mindseye.opt.orient.TrustRegionStrategy in project MindsEye by SimiaCryptus.
the class TrustSphereTest method train.
@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
log.code(() -> {
@Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
@Nonnull final Trainable trainable = new SampledArrayTrainable(trainingData, supervisedNetwork, 10000);
@Nonnull final TrustRegionStrategy trustRegionStrategy = new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new AdaptiveTrustSphere();
}
};
return new IterativeTrainer(trainable).setIterationsPerSample(100).setMonitor(monitor).setOrientation(trustRegionStrategy).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
});
}
Aggregations