Search in sources :

Example 1 with SumInputsLayer

use of com.simiacryptus.mindseye.layers.java.SumInputsLayer in project MindsEye by SimiaCryptus.

the class BinarySumLayer method getCompatibilityLayer.

/**
 * Gets compatibility layer.
 *
 * @return the compatibility layer
 */
@Nonnull
public Layer getCompatibilityLayer() {
    @Nonnull PipelineNetwork network = new PipelineNetwork(2);
    network.wrap(new SumInputsLayer(), network.wrap(new LinearActivationLayer().setScale(this.leftFactor).freeze(), network.getInput(0)), network.wrap(new LinearActivationLayer().setScale(this.rightFactor).freeze(), network.getInput(1)));
    return network;
}
Also used : Nonnull(javax.annotation.Nonnull) SumInputsLayer(com.simiacryptus.mindseye.layers.java.SumInputsLayer) PipelineNetwork(com.simiacryptus.mindseye.network.PipelineNetwork) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) Nonnull(javax.annotation.Nonnull)

Example 2 with SumInputsLayer

use of com.simiacryptus.mindseye.layers.java.SumInputsLayer in project MindsEye by SimiaCryptus.

the class SigmoidTreeNetwork method getHead.

@Nullable
@Override
public synchronized DAGNode getHead() {
    if (null == head) {
        synchronized (this) {
            if (null == head) {
                reset();
                final DAGNode input = getInput(0);
                switch(getMode()) {
                    case Linear:
                        head = add(alpha.setFrozen(false), add(alphaBias.setFrozen(false), input));
                        break;
                    case Fuzzy:
                        {
                            final DAGNode gateNode = add(gate.setFrozen(false), null != gateBias ? add(gateBias.setFrozen(false), input) : input);
                            head = add(new ProductInputsLayer(), add(alpha.setFrozen(false), add(alphaBias.setFrozen(false), input)), add(new LinearActivationLayer().setScale(2).freeze(), add(new SigmoidActivationLayer().setBalanced(false), gateNode)));
                            break;
                        }
                    case Bilinear:
                        {
                            final DAGNode gateNode = add(gate.setFrozen(false), null != gateBias ? add(gateBias.setFrozen(false), input) : input);
                            head = add(new SumInputsLayer(), add(new ProductInputsLayer(), add(alpha.setFrozen(false), add(alphaBias.setFrozen(false), input)), add(new SigmoidActivationLayer().setBalanced(false), gateNode)), add(new ProductInputsLayer(), add(beta.setFrozen(false), add(betaBias.setFrozen(false), input)), add(new SigmoidActivationLayer().setBalanced(false), add(new LinearActivationLayer().setScale(-1).freeze(), gateNode))));
                            break;
                        }
                    case Final:
                        final DAGNode gateNode = add(gate.setFrozen(false), null != gateBias ? add(gateBias.setFrozen(false), input) : input);
                        head = add(new SumInputsLayer(), add(new ProductInputsLayer(), add(alpha, input), add(new SigmoidActivationLayer().setBalanced(false), gateNode)), add(new ProductInputsLayer(), add(beta, input), add(new SigmoidActivationLayer().setBalanced(false), add(new LinearActivationLayer().setScale(-1).freeze(), gateNode))));
                        break;
                }
            }
        }
    }
    return head;
}
Also used : SumInputsLayer(com.simiacryptus.mindseye.layers.java.SumInputsLayer) DAGNode(com.simiacryptus.mindseye.network.DAGNode) SigmoidActivationLayer(com.simiacryptus.mindseye.layers.java.SigmoidActivationLayer) ProductInputsLayer(com.simiacryptus.mindseye.layers.java.ProductInputsLayer) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) Nullable(javax.annotation.Nullable)

Example 3 with SumInputsLayer

use of com.simiacryptus.mindseye.layers.java.SumInputsLayer in project MindsEye by SimiaCryptus.

the class EncodingProblem method run.

@Nonnull
@Override
public EncodingProblem run(@Nonnull final NotebookOutput log) {
    @Nonnull final TrainingMonitor monitor = TestUtil.getMonitor(history);
    Tensor[][] trainingData;
    try {
        trainingData = data.trainingData().map(labeledObject -> {
            return new Tensor[] { new Tensor(features).set(this::random), labeledObject.data };
        }).toArray(i -> new Tensor[i][]);
    } catch (@Nonnull final IOException e) {
        throw new RuntimeException(e);
    }
    @Nonnull final DAGNetwork imageNetwork = revFactory.vectorToImage(log, features);
    log.h3("Network Diagram");
    log.code(() -> {
        return Graphviz.fromGraph(TestUtil.toGraph(imageNetwork)).height(400).width(600).render(Format.PNG).toImage();
    });
    @Nonnull final PipelineNetwork trainingNetwork = new PipelineNetwork(2);
    @Nullable final DAGNode image = trainingNetwork.add(imageNetwork, trainingNetwork.getInput(0));
    @Nullable final DAGNode softmax = trainingNetwork.add(new SoftmaxActivationLayer(), trainingNetwork.getInput(0));
    trainingNetwork.add(new SumInputsLayer(), trainingNetwork.add(new EntropyLossLayer(), softmax, softmax), trainingNetwork.add(new NthPowerActivationLayer().setPower(1.0 / 2.0), trainingNetwork.add(new MeanSqLossLayer(), image, trainingNetwork.getInput(1))));
    log.h3("Training");
    log.p("We start by training apply a very small population to improve initial convergence performance:");
    TestUtil.instrumentPerformance(trainingNetwork);
    @Nonnull final Tensor[][] primingData = Arrays.copyOfRange(trainingData, 0, 1000);
    @Nonnull final ValidatingTrainer preTrainer = optimizer.train(log, (SampledTrainable) new SampledArrayTrainable(primingData, trainingNetwork, trainingSize, batchSize).setMinSamples(trainingSize).setMask(true, false), new ArrayTrainable(primingData, trainingNetwork, batchSize), monitor);
    log.code(() -> {
        preTrainer.setTimeout(timeoutMinutes / 2, TimeUnit.MINUTES).setMaxIterations(batchSize).run();
    });
    TestUtil.extractPerformance(log, trainingNetwork);
    log.p("Then our main training phase:");
    TestUtil.instrumentPerformance(trainingNetwork);
    @Nonnull final ValidatingTrainer mainTrainer = optimizer.train(log, (SampledTrainable) new SampledArrayTrainable(trainingData, trainingNetwork, trainingSize, batchSize).setMinSamples(trainingSize).setMask(true, false), new ArrayTrainable(trainingData, trainingNetwork, batchSize), monitor);
    log.code(() -> {
        mainTrainer.setTimeout(timeoutMinutes, TimeUnit.MINUTES).setMaxIterations(batchSize).run();
    });
    TestUtil.extractPerformance(log, trainingNetwork);
    if (!history.isEmpty()) {
        log.code(() -> {
            return TestUtil.plot(history);
        });
        log.code(() -> {
            return TestUtil.plotTime(history);
        });
    }
    try {
        @Nonnull String filename = log.getName().toString() + EncodingProblem.modelNo++ + "_plot.png";
        ImageIO.write(Util.toImage(TestUtil.plot(history)), "png", log.file(filename));
        log.appendFrontMatterProperty("result_plot", filename, ";");
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    // log.file()
    @Nonnull final String modelName = "encoding_model_" + EncodingProblem.modelNo++ + ".json";
    log.appendFrontMatterProperty("result_model", modelName, ";");
    log.p("Saved model as " + log.file(trainingNetwork.getJson().toString(), modelName, modelName));
    log.h3("Results");
    @Nonnull final PipelineNetwork testNetwork = new PipelineNetwork(2);
    testNetwork.add(imageNetwork, testNetwork.getInput(0));
    log.code(() -> {
        @Nonnull final TableOutput table = new TableOutput();
        Arrays.stream(trainingData).map(tensorArray -> {
            @Nullable final Tensor predictionSignal = testNetwork.eval(tensorArray).getData().get(0);
            @Nonnull final LinkedHashMap<CharSequence, Object> row = new LinkedHashMap<>();
            row.put("Source", log.image(tensorArray[1].toImage(), ""));
            row.put("Echo", log.image(predictionSignal.toImage(), ""));
            return row;
        }).filter(x -> null != x).limit(10).forEach(table::putRow);
        return table;
    });
    log.p("Learned Model Statistics:");
    log.code(() -> {
        @Nonnull final ScalarStatistics scalarStatistics = new ScalarStatistics();
        trainingNetwork.state().stream().flatMapToDouble(x -> Arrays.stream(x)).forEach(v -> scalarStatistics.add(v));
        return scalarStatistics.getMetrics();
    });
    log.p("Learned Representation Statistics:");
    log.code(() -> {
        @Nonnull final ScalarStatistics scalarStatistics = new ScalarStatistics();
        Arrays.stream(trainingData).flatMapToDouble(row -> Arrays.stream(row[0].getData())).forEach(v -> scalarStatistics.add(v));
        return scalarStatistics.getMetrics();
    });
    log.p("Some rendered unit vectors:");
    for (int featureNumber = 0; featureNumber < features; featureNumber++) {
        @Nonnull final Tensor input = new Tensor(features).set(featureNumber, 1);
        @Nullable final Tensor tensor = imageNetwork.eval(input).getData().get(0);
        TestUtil.renderToImages(tensor, true).forEach(img -> {
            log.out(log.image(img, ""));
        });
    }
    return this;
}
Also used : PipelineNetwork(com.simiacryptus.mindseye.network.PipelineNetwork) Graphviz(guru.nidi.graphviz.engine.Graphviz) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) Arrays(java.util.Arrays) TableOutput(com.simiacryptus.util.TableOutput) Tensor(com.simiacryptus.mindseye.lang.Tensor) SumInputsLayer(com.simiacryptus.mindseye.layers.java.SumInputsLayer) SampledTrainable(com.simiacryptus.mindseye.eval.SampledTrainable) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) SoftmaxActivationLayer(com.simiacryptus.mindseye.layers.java.SoftmaxActivationLayer) Format(guru.nidi.graphviz.engine.Format) TrainingMonitor(com.simiacryptus.mindseye.opt.TrainingMonitor) ImageIO(javax.imageio.ImageIO) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) ValidatingTrainer(com.simiacryptus.mindseye.opt.ValidatingTrainer) StepRecord(com.simiacryptus.mindseye.test.StepRecord) NotebookOutput(com.simiacryptus.util.io.NotebookOutput) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Util(com.simiacryptus.util.Util) MeanSqLossLayer(com.simiacryptus.mindseye.layers.java.MeanSqLossLayer) NthPowerActivationLayer(com.simiacryptus.mindseye.layers.java.NthPowerActivationLayer) IOException(java.io.IOException) TestUtil(com.simiacryptus.mindseye.test.TestUtil) DAGNode(com.simiacryptus.mindseye.network.DAGNode) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) ArrayTrainable(com.simiacryptus.mindseye.eval.ArrayTrainable) ScalarStatistics(com.simiacryptus.util.data.ScalarStatistics) DAGNetwork(com.simiacryptus.mindseye.network.DAGNetwork) ScalarStatistics(com.simiacryptus.util.data.ScalarStatistics) SumInputsLayer(com.simiacryptus.mindseye.layers.java.SumInputsLayer) DAGNetwork(com.simiacryptus.mindseye.network.DAGNetwork) MeanSqLossLayer(com.simiacryptus.mindseye.layers.java.MeanSqLossLayer) LinkedHashMap(java.util.LinkedHashMap) SoftmaxActivationLayer(com.simiacryptus.mindseye.layers.java.SoftmaxActivationLayer) TrainingMonitor(com.simiacryptus.mindseye.opt.TrainingMonitor) TableOutput(com.simiacryptus.util.TableOutput) Tensor(com.simiacryptus.mindseye.lang.Tensor) Nonnull(javax.annotation.Nonnull) PipelineNetwork(com.simiacryptus.mindseye.network.PipelineNetwork) IOException(java.io.IOException) EntropyLossLayer(com.simiacryptus.mindseye.layers.java.EntropyLossLayer) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) ArrayTrainable(com.simiacryptus.mindseye.eval.ArrayTrainable) DAGNode(com.simiacryptus.mindseye.network.DAGNode) SampledArrayTrainable(com.simiacryptus.mindseye.eval.SampledArrayTrainable) NthPowerActivationLayer(com.simiacryptus.mindseye.layers.java.NthPowerActivationLayer) ValidatingTrainer(com.simiacryptus.mindseye.opt.ValidatingTrainer) Nullable(javax.annotation.Nullable) Nonnull(javax.annotation.Nonnull)

Aggregations

SumInputsLayer (com.simiacryptus.mindseye.layers.java.SumInputsLayer)3 LinearActivationLayer (com.simiacryptus.mindseye.layers.java.LinearActivationLayer)2 DAGNode (com.simiacryptus.mindseye.network.DAGNode)2 PipelineNetwork (com.simiacryptus.mindseye.network.PipelineNetwork)2 Nonnull (javax.annotation.Nonnull)2 Nullable (javax.annotation.Nullable)2 ArrayTrainable (com.simiacryptus.mindseye.eval.ArrayTrainable)1 SampledArrayTrainable (com.simiacryptus.mindseye.eval.SampledArrayTrainable)1 SampledTrainable (com.simiacryptus.mindseye.eval.SampledTrainable)1 Tensor (com.simiacryptus.mindseye.lang.Tensor)1 EntropyLossLayer (com.simiacryptus.mindseye.layers.java.EntropyLossLayer)1 MeanSqLossLayer (com.simiacryptus.mindseye.layers.java.MeanSqLossLayer)1 NthPowerActivationLayer (com.simiacryptus.mindseye.layers.java.NthPowerActivationLayer)1 ProductInputsLayer (com.simiacryptus.mindseye.layers.java.ProductInputsLayer)1 SigmoidActivationLayer (com.simiacryptus.mindseye.layers.java.SigmoidActivationLayer)1 SoftmaxActivationLayer (com.simiacryptus.mindseye.layers.java.SoftmaxActivationLayer)1 DAGNetwork (com.simiacryptus.mindseye.network.DAGNetwork)1 TrainingMonitor (com.simiacryptus.mindseye.opt.TrainingMonitor)1 ValidatingTrainer (com.simiacryptus.mindseye.opt.ValidatingTrainer)1 StepRecord (com.simiacryptus.mindseye.test.StepRecord)1