Search in sources :

Example 1 with LinearActivationLayer

use of com.simiacryptus.mindseye.layers.java.LinearActivationLayer in project MindsEye by SimiaCryptus.

the class BinarySumLayer method getCompatibilityLayer.

/**
 * Gets compatibility layer.
 *
 * @return the compatibility layer
 */
@Nonnull
public Layer getCompatibilityLayer() {
    @Nonnull PipelineNetwork network = new PipelineNetwork(2);
    network.wrap(new SumInputsLayer(), network.wrap(new LinearActivationLayer().setScale(this.leftFactor).freeze(), network.getInput(0)), network.wrap(new LinearActivationLayer().setScale(this.rightFactor).freeze(), network.getInput(1)));
    return network;
}
Also used : Nonnull(javax.annotation.Nonnull) SumInputsLayer(com.simiacryptus.mindseye.layers.java.SumInputsLayer) PipelineNetwork(com.simiacryptus.mindseye.network.PipelineNetwork) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) Nonnull(javax.annotation.Nonnull)

Example 2 with LinearActivationLayer

use of com.simiacryptus.mindseye.layers.java.LinearActivationLayer in project MindsEye by SimiaCryptus.

the class SigmoidTreeNetwork method getHead.

@Nullable
@Override
public synchronized DAGNode getHead() {
    if (null == head) {
        synchronized (this) {
            if (null == head) {
                reset();
                final DAGNode input = getInput(0);
                switch(getMode()) {
                    case Linear:
                        head = add(alpha.setFrozen(false), add(alphaBias.setFrozen(false), input));
                        break;
                    case Fuzzy:
                        {
                            final DAGNode gateNode = add(gate.setFrozen(false), null != gateBias ? add(gateBias.setFrozen(false), input) : input);
                            head = add(new ProductInputsLayer(), add(alpha.setFrozen(false), add(alphaBias.setFrozen(false), input)), add(new LinearActivationLayer().setScale(2).freeze(), add(new SigmoidActivationLayer().setBalanced(false), gateNode)));
                            break;
                        }
                    case Bilinear:
                        {
                            final DAGNode gateNode = add(gate.setFrozen(false), null != gateBias ? add(gateBias.setFrozen(false), input) : input);
                            head = add(new SumInputsLayer(), add(new ProductInputsLayer(), add(alpha.setFrozen(false), add(alphaBias.setFrozen(false), input)), add(new SigmoidActivationLayer().setBalanced(false), gateNode)), add(new ProductInputsLayer(), add(beta.setFrozen(false), add(betaBias.setFrozen(false), input)), add(new SigmoidActivationLayer().setBalanced(false), add(new LinearActivationLayer().setScale(-1).freeze(), gateNode))));
                            break;
                        }
                    case Final:
                        final DAGNode gateNode = add(gate.setFrozen(false), null != gateBias ? add(gateBias.setFrozen(false), input) : input);
                        head = add(new SumInputsLayer(), add(new ProductInputsLayer(), add(alpha, input), add(new SigmoidActivationLayer().setBalanced(false), gateNode)), add(new ProductInputsLayer(), add(beta, input), add(new SigmoidActivationLayer().setBalanced(false), add(new LinearActivationLayer().setScale(-1).freeze(), gateNode))));
                        break;
                }
            }
        }
    }
    return head;
}
Also used : SumInputsLayer(com.simiacryptus.mindseye.layers.java.SumInputsLayer) DAGNode(com.simiacryptus.mindseye.network.DAGNode) SigmoidActivationLayer(com.simiacryptus.mindseye.layers.java.SigmoidActivationLayer) ProductInputsLayer(com.simiacryptus.mindseye.layers.java.ProductInputsLayer) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) Nullable(javax.annotation.Nullable)

Example 3 with LinearActivationLayer

use of com.simiacryptus.mindseye.layers.java.LinearActivationLayer in project MindsEye by SimiaCryptus.

the class ImageClassifier method deepDream.

/**
 * Deep dream.
 *
 * @param log   the log
 * @param image the image
 */
public void deepDream(@Nonnull final NotebookOutput log, final Tensor image) {
    log.code(() -> {
        @Nonnull ArrayList<StepRecord> history = new ArrayList<>();
        @Nonnull PipelineNetwork clamp = new PipelineNetwork(1);
        clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
        clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
        clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
        clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
        @Nonnull PipelineNetwork supervised = new PipelineNetwork(1);
        supervised.add(getNetwork().freeze(), supervised.wrap(clamp, supervised.getInput(0)));
        // CudaTensorList gpuInput = CudnnHandle.apply(gpu -> {
        // Precision precision = Precision.Float;
        // return CudaTensorList.wrap(gpu.getPtr(TensorArray.wrap(image), precision, MemoryType.Managed), 1, image.getDimensions(), precision);
        // });
        // @Nonnull Trainable trainable = new TensorListTrainable(supervised, gpuInput).setVerbosity(1).setMask(true);
        @Nonnull Trainable trainable = new ArrayTrainable(supervised, 1).setVerbose(true).setMask(true, false).setData(Arrays.<Tensor[]>asList(new Tensor[] { image }));
        new IterativeTrainer(trainable).setMonitor(getTrainingMonitor(history, supervised)).setOrientation(new QQN()).setLineSearchFactory(name -> new ArmijoWolfeSearch()).setTimeout(60, TimeUnit.MINUTES).runAndFree();
        return TestUtil.plot(history);
    });
}
Also used : ActivationLayer(com.simiacryptus.mindseye.layers.cudnn.ActivationLayer) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) Tensor(com.simiacryptus.mindseye.lang.Tensor) IterativeTrainer(com.simiacryptus.mindseye.opt.IterativeTrainer) Nonnull(javax.annotation.Nonnull) ArrayList(java.util.ArrayList) PipelineNetwork(com.simiacryptus.mindseye.network.PipelineNetwork) ArrayTrainable(com.simiacryptus.mindseye.eval.ArrayTrainable) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) QQN(com.simiacryptus.mindseye.opt.orient.QQN) StepRecord(com.simiacryptus.mindseye.test.StepRecord) ArmijoWolfeSearch(com.simiacryptus.mindseye.opt.line.ArmijoWolfeSearch) Trainable(com.simiacryptus.mindseye.eval.Trainable) ArrayTrainable(com.simiacryptus.mindseye.eval.ArrayTrainable)

Example 4 with LinearActivationLayer

use of com.simiacryptus.mindseye.layers.java.LinearActivationLayer in project MindsEye by SimiaCryptus.

the class ImageClassifier method deepDream.

/**
 * Deep dream.
 *
 * @param log                 the log
 * @param image               the image
 * @param targetCategoryIndex the target category index
 * @param totalCategories     the total categories
 * @param config              the config
 * @param network             the network
 * @param lossLayer           the loss layer
 * @param targetValue         the target value
 */
public void deepDream(@Nonnull final NotebookOutput log, final Tensor image, final int targetCategoryIndex, final int totalCategories, Function<IterativeTrainer, IterativeTrainer> config, final Layer network, final Layer lossLayer, final double targetValue) {
    @Nonnull List<Tensor[]> data = Arrays.<Tensor[]>asList(new Tensor[] { image, new Tensor(1, 1, totalCategories).set(targetCategoryIndex, targetValue) });
    log.code(() -> {
        for (Tensor[] tensors : data) {
            ImageClassifier.log.info(log.image(tensors[0].toImage(), "") + tensors[1]);
        }
    });
    log.code(() -> {
        @Nonnull ArrayList<StepRecord> history = new ArrayList<>();
        @Nonnull PipelineNetwork clamp = new PipelineNetwork(1);
        clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
        clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
        clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
        clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
        @Nonnull PipelineNetwork supervised = new PipelineNetwork(2);
        supervised.wrap(lossLayer, supervised.add(network.freeze(), supervised.wrap(clamp, supervised.getInput(0))), supervised.getInput(1));
        // TensorList[] gpuInput = data.stream().map(data1 -> {
        // return CudnnHandle.apply(gpu -> {
        // Precision precision = Precision.Float;
        // return CudaTensorList.wrap(gpu.getPtr(TensorArray.wrap(data1), precision, MemoryType.Managed), 1, image.getDimensions(), precision);
        // });
        // }).toArray(i -> new TensorList[i]);
        // @Nonnull Trainable trainable = new TensorListTrainable(supervised, gpuInput).setVerbosity(1).setMask(true);
        @Nonnull Trainable trainable = new ArrayTrainable(supervised, 1).setVerbose(true).setMask(true, false).setData(data);
        config.apply(new IterativeTrainer(trainable).setMonitor(getTrainingMonitor(history, supervised)).setOrientation(new QQN()).setLineSearchFactory(name -> new ArmijoWolfeSearch()).setTimeout(60, TimeUnit.MINUTES)).setTerminateThreshold(Double.NEGATIVE_INFINITY).runAndFree();
        return TestUtil.plot(history);
    });
}
Also used : Tensor(com.simiacryptus.mindseye.lang.Tensor) ActivationLayer(com.simiacryptus.mindseye.layers.cudnn.ActivationLayer) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) IterativeTrainer(com.simiacryptus.mindseye.opt.IterativeTrainer) Nonnull(javax.annotation.Nonnull) ArrayList(java.util.ArrayList) PipelineNetwork(com.simiacryptus.mindseye.network.PipelineNetwork) ArrayTrainable(com.simiacryptus.mindseye.eval.ArrayTrainable) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) QQN(com.simiacryptus.mindseye.opt.orient.QQN) StepRecord(com.simiacryptus.mindseye.test.StepRecord) ArmijoWolfeSearch(com.simiacryptus.mindseye.opt.line.ArmijoWolfeSearch) Trainable(com.simiacryptus.mindseye.eval.Trainable) ArrayTrainable(com.simiacryptus.mindseye.eval.ArrayTrainable)

Example 5 with LinearActivationLayer

use of com.simiacryptus.mindseye.layers.java.LinearActivationLayer in project MindsEye by SimiaCryptus.

the class ArtistryUtil method getClamp.

/**
 * Gets clamp.
 *
 * @param max the max
 * @return the clamp
 */
@Nonnull
public static PipelineNetwork getClamp(final int max) {
    @Nonnull PipelineNetwork clamp = new PipelineNetwork(1);
    clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
    clamp.add(new LinearActivationLayer().setBias(max).setScale(-1).freeze());
    clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
    clamp.add(new LinearActivationLayer().setBias(max).setScale(-1).freeze());
    return clamp;
}
Also used : ActivationLayer(com.simiacryptus.mindseye.layers.cudnn.ActivationLayer) SquareActivationLayer(com.simiacryptus.mindseye.layers.cudnn.SquareActivationLayer) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) Nonnull(javax.annotation.Nonnull) PipelineNetwork(com.simiacryptus.mindseye.network.PipelineNetwork) LinearActivationLayer(com.simiacryptus.mindseye.layers.java.LinearActivationLayer) Nonnull(javax.annotation.Nonnull)

Aggregations

LinearActivationLayer (com.simiacryptus.mindseye.layers.java.LinearActivationLayer)6 PipelineNetwork (com.simiacryptus.mindseye.network.PipelineNetwork)5 Nonnull (javax.annotation.Nonnull)4 ActivationLayer (com.simiacryptus.mindseye.layers.cudnn.ActivationLayer)3 ArrayTrainable (com.simiacryptus.mindseye.eval.ArrayTrainable)2 Trainable (com.simiacryptus.mindseye.eval.Trainable)2 Tensor (com.simiacryptus.mindseye.lang.Tensor)2 SumInputsLayer (com.simiacryptus.mindseye.layers.java.SumInputsLayer)2 IterativeTrainer (com.simiacryptus.mindseye.opt.IterativeTrainer)2 ArmijoWolfeSearch (com.simiacryptus.mindseye.opt.line.ArmijoWolfeSearch)2 QQN (com.simiacryptus.mindseye.opt.orient.QQN)2 StepRecord (com.simiacryptus.mindseye.test.StepRecord)2 ArrayList (java.util.ArrayList)2 Nullable (javax.annotation.Nullable)2 SquareActivationLayer (com.simiacryptus.mindseye.layers.cudnn.SquareActivationLayer)1 NthPowerActivationLayer (com.simiacryptus.mindseye.layers.java.NthPowerActivationLayer)1 ProductInputsLayer (com.simiacryptus.mindseye.layers.java.ProductInputsLayer)1 SigmoidActivationLayer (com.simiacryptus.mindseye.layers.java.SigmoidActivationLayer)1 DAGNode (com.simiacryptus.mindseye.network.DAGNode)1