use of com.simiacryptus.mindseye.network.PipelineNetwork in project MindsEye by SimiaCryptus.
the class DeepDream method train.
/**
* Train buffered image.
*
* @param server the server
* @param log the log
* @param canvasImage the canvas image
* @param network the network
* @param precision the precision
* @param trainingMinutes the training minutes
* @return the buffered image
*/
@Nonnull
public BufferedImage train(final StreamNanoHTTPD server, @Nonnull final NotebookOutput log, final BufferedImage canvasImage, final PipelineNetwork network, final Precision precision, final int trainingMinutes) {
System.gc();
Tensor canvas = Tensor.fromRGB(canvasImage);
TestUtil.monitorImage(canvas, false, false);
network.setFrozen(true);
ArtistryUtil.setPrecision(network, precision);
@Nonnull Trainable trainable = new ArrayTrainable(network, 1).setVerbose(true).setMask(true).setData(Arrays.asList(new Tensor[][] { { canvas } }));
TestUtil.instrumentPerformance(network);
if (null != server)
ArtistryUtil.addLayersHandler(network, server);
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
new IterativeTrainer(trainable).setMonitor(TestUtil.getMonitor(history)).setIterationsPerSample(100).setOrientation(new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new RangeConstraint();
}
}).setLineSearchFactory(name -> new BisectionSearch().setSpanTol(1e-1).setCurrentRate(1e3)).setTimeout(trainingMinutes, TimeUnit.MINUTES).setTerminateThreshold(Double.NEGATIVE_INFINITY).runAndFree();
return TestUtil.plot(history);
});
return canvas.toImage();
}
use of com.simiacryptus.mindseye.network.PipelineNetwork in project MindsEye by SimiaCryptus.
the class DeepDream method deepDream.
/**
* Style transfer buffered image.
*
* @param server the server
* @param log the log
* @param canvasImage the canvas image
* @param styleParameters the style parameters
* @param trainingMinutes the training minutes
* @return the buffered image
*/
@Nonnull
public BufferedImage deepDream(final StreamNanoHTTPD server, @Nonnull final NotebookOutput log, final BufferedImage canvasImage, final StyleSetup<T> styleParameters, final int trainingMinutes) {
PipelineNetwork network = fitnessNetwork(processStats(styleParameters));
log.p("Input Parameters:");
log.code(() -> {
return ArtistryUtil.toJson(styleParameters);
});
log.p("Input Content:");
log.p(log.image(styleParameters.contentImage, "Content Image"));
log.p("Input Canvas:");
log.p(log.image(canvasImage, "Input Canvas"));
BufferedImage result = train(server, log, canvasImage, network, styleParameters.precision, trainingMinutes);
log.p("Output Canvas:");
log.p(log.image(result, "Output Canvas"));
return result;
}
use of com.simiacryptus.mindseye.network.PipelineNetwork in project MindsEye by SimiaCryptus.
the class ImageClassifier method deepDream.
/**
* Deep dream.
*
* @param log the log
* @param image the image
*/
public void deepDream(@Nonnull final NotebookOutput log, final Tensor image) {
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
@Nonnull PipelineNetwork clamp = new PipelineNetwork(1);
clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
@Nonnull PipelineNetwork supervised = new PipelineNetwork(1);
supervised.add(getNetwork().freeze(), supervised.wrap(clamp, supervised.getInput(0)));
// CudaTensorList gpuInput = CudnnHandle.apply(gpu -> {
// Precision precision = Precision.Float;
// return CudaTensorList.wrap(gpu.getPtr(TensorArray.wrap(image), precision, MemoryType.Managed), 1, image.getDimensions(), precision);
// });
// @Nonnull Trainable trainable = new TensorListTrainable(supervised, gpuInput).setVerbosity(1).setMask(true);
@Nonnull Trainable trainable = new ArrayTrainable(supervised, 1).setVerbose(true).setMask(true, false).setData(Arrays.<Tensor[]>asList(new Tensor[] { image }));
new IterativeTrainer(trainable).setMonitor(getTrainingMonitor(history, supervised)).setOrientation(new QQN()).setLineSearchFactory(name -> new ArmijoWolfeSearch()).setTimeout(60, TimeUnit.MINUTES).runAndFree();
return TestUtil.plot(history);
});
}
use of com.simiacryptus.mindseye.network.PipelineNetwork in project MindsEye by SimiaCryptus.
the class StyleTransfer method measureStyle.
/**
* Measure style neural setup.
*
* @param style the style
* @return the neural setup
*/
public NeuralSetup measureStyle(final StyleSetup<T> style) {
NeuralSetup<T> self = new NeuralSetup(style);
List<CharSequence> keyList = style.styleImages.keySet().stream().collect(Collectors.toList());
Tensor contentInput = Tensor.fromRGB(style.contentImage);
List<Tensor> styleInputs = keyList.stream().map(x -> style.styleImages.get(x)).map(img -> Tensor.fromRGB(img)).collect(Collectors.toList());
IntStream.range(0, keyList.size()).forEach(i -> {
self.styleTargets.put(keyList.get(i), new StyleTarget());
});
self.contentTarget = new ContentTarget();
for (final T layerType : getLayerTypes()) {
System.gc();
final PipelineNetwork network = layerType.texture();
ArtistryUtil.setPrecision(network, style.precision);
Tensor content = network.eval(contentInput).getDataAndFree().getAndFree(0);
self.contentTarget.content.put(layerType, content);
logger.info(String.format("%s : target content = %s", layerType.name(), content.prettyPrint()));
logger.info(String.format("%s : content statistics = %s", layerType.name(), JsonUtil.toJson(new ScalarStatistics().add(content.getData()).getMetrics())));
for (int i = 0; i < styleInputs.size(); i++) {
Tensor styleInput = styleInputs.get(i);
CharSequence key = keyList.get(i);
StyleTarget<T> styleTarget = self.styleTargets.get(key);
if (0 == self.style.styles.entrySet().stream().filter(e1 -> e1.getKey().contains(key)).map(x -> (LayerStyleParams) x.getValue().params.get(layerType)).filter(x -> null != x).filter(x -> x.mean != 0 || x.cov != 0).count())
continue;
System.gc();
Tensor mean = ArtistryUtil.wrapTilesAvg(ArtistryUtil.avg(network.copy())).eval(styleInput).getDataAndFree().getAndFree(0);
styleTarget.mean.put(layerType, mean);
logger.info(String.format("%s : style mean = %s", layerType.name(), mean.prettyPrint()));
logger.info(String.format("%s : mean statistics = %s", layerType.name(), JsonUtil.toJson(new ScalarStatistics().add(mean.getData()).getMetrics())));
if (0 == self.style.styles.entrySet().stream().filter(e1 -> e1.getKey().contains(key)).map(x -> (LayerStyleParams) x.getValue().params.get(layerType)).filter(x -> null != x).filter(x -> x.cov != 0).count())
continue;
System.gc();
Tensor cov0 = ArtistryUtil.wrapTilesAvg(ArtistryUtil.gram(network.copy())).eval(styleInput).getDataAndFree().getAndFree(0);
Tensor cov1 = ArtistryUtil.wrapTilesAvg(ArtistryUtil.gram(network.copy(), mean)).eval(styleInput).getDataAndFree().getAndFree(0);
styleTarget.cov0.put(layerType, cov0);
styleTarget.cov1.put(layerType, cov1);
int featureBands = mean.getDimensions()[2];
int covarianceElements = cov1.getDimensions()[2];
int selectedBands = covarianceElements / featureBands;
logger.info(String.format("%s : target cov0 = %s", layerType.name(), cov0.reshapeCast(featureBands, selectedBands, 1).prettyPrint()));
logger.info(String.format("%s : cov0 statistics = %s", layerType.name(), JsonUtil.toJson(new ScalarStatistics().add(cov0.getData()).getMetrics())));
logger.info(String.format("%s : target cov1 = %s", layerType.name(), cov1.reshapeCast(featureBands, selectedBands, 1).prettyPrint()));
logger.info(String.format("%s : cov1 statistics = %s", layerType.name(), JsonUtil.toJson(new ScalarStatistics().add(cov1.getData()).getMetrics())));
}
}
return self;
}
use of com.simiacryptus.mindseye.network.PipelineNetwork in project MindsEye by SimiaCryptus.
the class StyleTransfer method styleTransfer.
/**
* Style transfer buffered image.
*
* @param server the server
* @param log the log
* @param canvasImage the canvas image
* @param styleParameters the style parameters
* @param trainingMinutes the training minutes
* @param measureStyle the measure style
* @return the buffered image
*/
public BufferedImage styleTransfer(final StreamNanoHTTPD server, @Nonnull final NotebookOutput log, final BufferedImage canvasImage, final StyleSetup<T> styleParameters, final int trainingMinutes, final NeuralSetup measureStyle) {
BufferedImage result = ArtistryUtil.logExceptionWithDefault(log, () -> {
log.p("Input Content:");
log.p(log.image(styleParameters.contentImage, "Content Image"));
log.p("Style Content:");
styleParameters.styleImages.forEach((file, styleImage) -> {
log.p(log.image(styleImage, file));
});
log.p("Input Canvas:");
log.p(log.image(canvasImage, "Input Canvas"));
System.gc();
Tensor canvas = Tensor.fromRGB(canvasImage);
TestUtil.monitorImage(canvas, false, false);
log.p("Input Parameters:");
log.code(() -> {
return ArtistryUtil.toJson(styleParameters);
});
Trainable trainable = log.code(() -> {
PipelineNetwork network = fitnessNetwork(measureStyle);
network.setFrozen(true);
ArtistryUtil.setPrecision(network, styleParameters.precision);
TestUtil.instrumentPerformance(network);
if (null != server)
ArtistryUtil.addLayersHandler(network, server);
return new ArrayTrainable(network, 1).setVerbose(true).setMask(true).setData(Arrays.asList(new Tensor[][] { { canvas } }));
});
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
new IterativeTrainer(trainable).setMonitor(TestUtil.getMonitor(history)).setOrientation(new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new RangeConstraint().setMin(1e-2).setMax(256);
}
}).setIterationsPerSample(100).setLineSearchFactory(name -> new BisectionSearch().setSpanTol(1e-1).setCurrentRate(1e6)).setTimeout(trainingMinutes, TimeUnit.MINUTES).setTerminateThreshold(Double.NEGATIVE_INFINITY).runAndFree();
return TestUtil.plot(history);
});
return canvas.toImage();
}, canvasImage);
log.p("Output Canvas:");
log.p(log.image(result, "Output Canvas"));
return result;
}
Aggregations