Search in sources :

Example 61 with ComputationGraph

use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.

the class GradientCheckTestsComputationGraph method testLSTMWithMerging.

@Test
public void testLSTMWithMerging() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(0.2, 0.6)).updater(Updater.NONE).learningRate(1.0).graphBuilder().addInputs("input").setOutputs("out").addLayer("lstm1", new GravesLSTM.Builder().nIn(3).nOut(4).activation(Activation.TANH).build(), "input").addLayer("lstm2", new GravesLSTM.Builder().nIn(4).nOut(4).activation(Activation.TANH).build(), "lstm1").addLayer("dense1", new DenseLayer.Builder().nIn(4).nOut(4).activation(Activation.SIGMOID).build(), "lstm1").addLayer("lstm3", new GravesLSTM.Builder().nIn(4).nOut(4).activation(Activation.TANH).build(), "dense1").addVertex("merge", new MergeVertex(), "lstm2", "lstm3").addLayer("out", new RnnOutputLayer.Builder().nIn(8).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(), "merge").inputPreProcessor("dense1", new RnnToFeedForwardPreProcessor()).inputPreProcessor("lstm3", new FeedForwardToRnnPreProcessor()).pretrain(false).backprop(true).build();
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();
    Random r = new Random(12345);
    INDArray input = Nd4j.rand(new int[] { 3, 3, 5 });
    INDArray labels = Nd4j.zeros(3, 3, 5);
    for (int i = 0; i < 3; i++) {
        for (int j = 0; j < 5; j++) {
            labels.putScalar(new int[] { i, r.nextInt(3), j }, 1.0);
        }
    }
    if (PRINT_RESULTS) {
        System.out.println("testLSTMWithMerging()");
        for (int j = 0; j < graph.getNumLayers(); j++) System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }
    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] { input }, new INDArray[] { labels });
    String msg = "testLSTMWithMerging()";
    assertTrue(msg, gradOK);
}
Also used : UniformDistribution(org.deeplearning4j.nn.conf.distribution.UniformDistribution) RnnToFeedForwardPreProcessor(org.deeplearning4j.nn.conf.preprocessor.RnnToFeedForwardPreProcessor) Random(java.util.Random) INDArray(org.nd4j.linalg.api.ndarray.INDArray) ComputationGraphConfiguration(org.deeplearning4j.nn.conf.ComputationGraphConfiguration) FeedForwardToRnnPreProcessor(org.deeplearning4j.nn.conf.preprocessor.FeedForwardToRnnPreProcessor) ComputationGraph(org.deeplearning4j.nn.graph.ComputationGraph) Test(org.junit.Test)

Example 62 with ComputationGraph

use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.

the class GradientCheckTestsComputationGraph method testL2NormalizeVertex2d.

@Test
public void testL2NormalizeVertex2d() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).activation(Activation.TANH).updater(Updater.NONE).learningRate(1.0).graphBuilder().addInputs("in1").addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(3).build(), "in1").addVertex("norm", new L2NormalizeVertex(), "d1").addLayer("out1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2).nIn(3).nOut(2).activation(Activation.IDENTITY).build(), "norm").setOutputs("out1").build();
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();
    int[] mbSizes = new int[] { 1, 3, 10 };
    for (int minibatch : mbSizes) {
        INDArray in1 = Nd4j.rand(minibatch, 2);
        INDArray labels1 = Nd4j.rand(minibatch, 2);
        String testName = "testL2NormalizeVertex2d() - minibatch = " + minibatch;
        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < graph.getNumLayers(); j++) System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }
        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] { in1 }, new INDArray[] { labels1 });
        assertTrue(testName, gradOK);
    }
}
Also used : INDArray(org.nd4j.linalg.api.ndarray.INDArray) NormalDistribution(org.deeplearning4j.nn.conf.distribution.NormalDistribution) ComputationGraphConfiguration(org.deeplearning4j.nn.conf.ComputationGraphConfiguration) ComputationGraph(org.deeplearning4j.nn.graph.ComputationGraph) Test(org.junit.Test)

Example 63 with ComputationGraph

use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.

the class BNGradientCheckTest method testBatchNormCompGraphSimple.

@Test
public void testBatchNormCompGraphSimple() {
    int numClasses = 2;
    int height = 3;
    int width = 3;
    int channels = 1;
    long seed = 123;
    int minibatchSize = 3;
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed).updater(Updater.NONE).weightInit(WeightInit.XAVIER).regularization(false).graphBuilder().addInputs("in").setInputTypes(InputType.convolutional(height, width, channels)).addLayer("bn", new BatchNormalization.Builder().build(), "in").addLayer("out", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nOut(numClasses).build(), "bn").setOutputs("out").backprop(true).pretrain(false).build();
    ComputationGraph net = new ComputationGraph(conf);
    net.init();
    Random r = new Random(12345);
    //Order: examples, channels, height, width
    INDArray input = Nd4j.rand(new int[] { minibatchSize, channels, height, width });
    INDArray labels = Nd4j.zeros(minibatchSize, numClasses);
    for (int i = 0; i < minibatchSize; i++) {
        labels.putScalar(new int[] { i, r.nextInt(numClasses) }, 1.0);
    }
    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] { input }, new INDArray[] { labels });
    assertTrue(gradOK);
}
Also used : NeuralNetConfiguration(org.deeplearning4j.nn.conf.NeuralNetConfiguration) Random(java.util.Random) INDArray(org.nd4j.linalg.api.ndarray.INDArray) ComputationGraphConfiguration(org.deeplearning4j.nn.conf.ComputationGraphConfiguration) ComputationGraph(org.deeplearning4j.nn.graph.ComputationGraph) Test(org.junit.Test)

Example 64 with ComputationGraph

use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.

the class BNGradientCheckTest method testGradientBNWithCNNandSubsamplingCompGraph.

@Test
public void testGradientBNWithCNNandSubsamplingCompGraph() {
    //Parameterized test, testing combinations of:
    // (a) activation function
    // (b) Whether to test at random initialization, or after some learning (i.e., 'characteristic mode of operation')
    // (c) Loss function (with specified output activations)
    // (d) l1 and l2 values
    Activation[] activFns = { Activation.SIGMOID, Activation.TANH, Activation.IDENTITY };
    //If true: run some backprop steps first
    boolean[] characteristic = { false, true };
    LossFunctions.LossFunction[] lossFunctions = { LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD, LossFunctions.LossFunction.MSE };
    //i.e., lossFunctions[i] used with outputActivations[i] here
    Activation[] outputActivations = { Activation.SOFTMAX, Activation.TANH };
    double[] l2vals = { 0.0, 0.1, 0.1 };
    //i.e., use l2vals[j] with l1vals[j]
    double[] l1vals = { 0.0, 0.0, 0.2 };
    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 2;
    int hw = 5;
    int nOut = 3;
    INDArray input = Nd4j.rand(new int[] { minibatch, depth, hw, hw });
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }
    DataSet ds = new DataSet(input, labels);
    for (Activation afn : activFns) {
        for (boolean doLearningFirst : characteristic) {
            for (int i = 0; i < lossFunctions.length; i++) {
                for (int j = 0; j < l2vals.length; j++) {
                    LossFunctions.LossFunction lf = lossFunctions[i];
                    Activation outputActivation = outputActivations[i];
                    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).regularization(l1vals[j] > 0 || l2vals[j] > 0).l1(l1vals[j]).l2(l2vals[j]).optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).updater(Updater.NONE).weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-2, 2)).seed(12345L).graphBuilder().addInputs("in").addLayer("0", new ConvolutionLayer.Builder(2, 2).stride(1, 1).nOut(3).activation(afn).build(), "in").addLayer("1", new BatchNormalization.Builder().build(), "0").addLayer("2", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2).stride(1, 1).build(), "1").addLayer("3", new BatchNormalization(), "2").addLayer("4", new ActivationLayer.Builder().activation(afn).build(), "3").addLayer("5", new OutputLayer.Builder(lf).activation(outputActivation).nOut(nOut).build(), "4").setOutputs("5").setInputTypes(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true).build();
                    ComputationGraph net = new ComputationGraph(conf);
                    net.init();
                    String name = new Object() {
                    }.getClass().getEnclosingMethod().getName();
                    if (doLearningFirst) {
                        //Run a number of iterations of learning
                        net.setInput(0, ds.getFeatures());
                        net.setLabels(ds.getLabels());
                        net.computeGradientAndScore();
                        double scoreBefore = net.score();
                        for (int k = 0; k < 5; k++) net.fit(ds);
                        net.computeGradientAndScore();
                        double scoreAfter = net.score();
                        //Can't test in 'characteristic mode of operation' if not learning
                        String msg = name + " - score did not (sufficiently) decrease during learning - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation + ", doLearningFirst= " + doLearningFirst + " (before=" + scoreBefore + ", scoreAfter=" + scoreAfter + ")";
                        assertTrue(msg, scoreAfter < 0.9 * scoreBefore);
                    }
                    if (PRINT_RESULTS) {
                        System.out.println(name + " - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst + ", l1=" + l1vals[j] + ", l2=" + l2vals[j]);
                        for (int k = 0; k < net.getNumLayers(); k++) System.out.println("Layer " + k + " # params: " + net.getLayer(k).numParams());
                    }
                    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] { input }, new INDArray[] { labels });
                    assertTrue(gradOK);
                }
            }
        }
    }
}
Also used : DataSet(org.nd4j.linalg.dataset.DataSet) Activation(org.nd4j.linalg.activations.Activation) Random(java.util.Random) ComputationGraph(org.deeplearning4j.nn.graph.ComputationGraph) UniformDistribution(org.deeplearning4j.nn.conf.distribution.UniformDistribution) LossFunctions(org.nd4j.linalg.lossfunctions.LossFunctions) INDArray(org.nd4j.linalg.api.ndarray.INDArray) ComputationGraphConfiguration(org.deeplearning4j.nn.conf.ComputationGraphConfiguration) Test(org.junit.Test)

Example 65 with ComputationGraph

use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.

the class ComputationGraphConfigurationTest method testInvalidConfigurations.

@Test
public void testInvalidConfigurations() {
    //Test no inputs for a layer:
    try {
        new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1").addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1").addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build()).setOutputs("out").build();
        fail("No exception thrown for invalid configuration");
    } catch (IllegalStateException e) {
    //OK - exception is good
    //e.printStackTrace();
    }
    //Test no network inputs
    try {
        new NeuralNetConfiguration.Builder().graphBuilder().addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1").addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build(), "dense1").setOutputs("out").build();
        fail("No exception thrown for invalid configuration");
    } catch (IllegalStateException e) {
    //OK - exception is good
    //e.printStackTrace();
    }
    //Test no network outputs
    try {
        new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1").addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1").addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build(), "dense1").build();
        fail("No exception thrown for invalid configuration");
    } catch (IllegalStateException e) {
    //OK - exception is good
    //e.printStackTrace();
    }
    //Test: invalid input
    try {
        new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1").addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1").addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build(), "thisDoesntExist").setOutputs("out").build();
        fail("No exception thrown for invalid configuration");
    } catch (IllegalStateException e) {
    //OK - exception is good
    //e.printStackTrace();
    }
    //Test: graph with cycles
    try {
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1").addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1", "dense3").addLayer("dense2", new DenseLayer.Builder().nIn(2).nOut(2).build(), "dense1").addLayer("dense3", new DenseLayer.Builder().nIn(2).nOut(2).build(), "dense2").addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build(), "dense1").setOutputs("out").build();
        //Cycle detection happens in ComputationGraph.init()
        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();
        fail("No exception thrown for invalid configuration");
    } catch (IllegalStateException e) {
    //OK - exception is good
    //e.printStackTrace();
    }
}
Also used : OutputLayer(org.deeplearning4j.nn.conf.layers.OutputLayer) DenseLayer(org.deeplearning4j.nn.conf.layers.DenseLayer) ComputationGraph(org.deeplearning4j.nn.graph.ComputationGraph) Test(org.junit.Test)

Aggregations

ComputationGraph (org.deeplearning4j.nn.graph.ComputationGraph)109 Test (org.junit.Test)73 ComputationGraphConfiguration (org.deeplearning4j.nn.conf.ComputationGraphConfiguration)63 INDArray (org.nd4j.linalg.api.ndarray.INDArray)62 NeuralNetConfiguration (org.deeplearning4j.nn.conf.NeuralNetConfiguration)36 DataSet (org.nd4j.linalg.dataset.DataSet)25 NormalDistribution (org.deeplearning4j.nn.conf.distribution.NormalDistribution)22 OutputLayer (org.deeplearning4j.nn.conf.layers.OutputLayer)21 DenseLayer (org.deeplearning4j.nn.conf.layers.DenseLayer)19 MultiLayerNetwork (org.deeplearning4j.nn.multilayer.MultiLayerNetwork)19 ScoreIterationListener (org.deeplearning4j.optimize.listeners.ScoreIterationListener)17 DataSetIterator (org.nd4j.linalg.dataset.api.iterator.DataSetIterator)17 IrisDataSetIterator (org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator)14 Layer (org.deeplearning4j.nn.api.Layer)14 Random (java.util.Random)11 InMemoryModelSaver (org.deeplearning4j.earlystopping.saver.InMemoryModelSaver)10 MaxEpochsTerminationCondition (org.deeplearning4j.earlystopping.termination.MaxEpochsTerminationCondition)10 TrainingMaster (org.deeplearning4j.spark.api.TrainingMaster)10 MaxTimeIterationTerminationCondition (org.deeplearning4j.earlystopping.termination.MaxTimeIterationTerminationCondition)9 GridExecutioner (org.nd4j.linalg.api.ops.executioner.GridExecutioner)9