Search in sources :

Example 76 with NormalDistribution

use of org.deeplearning4j.nn.conf.distribution.NormalDistribution in project deeplearning4j by deeplearning4j.

the class MultiLayerTestRNN method testRnnTimeStepGravesLSTM.

@Test
public void testRnnTimeStepGravesLSTM() {
    Nd4j.getRandom().setSeed(12345);
    int timeSeriesLength = 12;
    //4 layer network: 2 GravesLSTM + DenseLayer + RnnOutputLayer. Hence also tests preprocessors.
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list().layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(5).nOut(7).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(2, new DenseLayer.Builder().nIn(8).nOut(9).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(3, new RnnOutputLayer.Builder(LossFunction.MCXENT).weightInit(WeightInit.DISTRIBUTION).nIn(9).nOut(4).activation(Activation.SOFTMAX).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).inputPreProcessor(2, new RnnToFeedForwardPreProcessor()).inputPreProcessor(3, new FeedForwardToRnnPreProcessor()).build();
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    INDArray input = Nd4j.rand(new int[] { 3, 5, timeSeriesLength });
    List<INDArray> allOutputActivations = mln.feedForward(input, true);
    INDArray fullOutL0 = allOutputActivations.get(1);
    INDArray fullOutL1 = allOutputActivations.get(2);
    INDArray fullOutL3 = allOutputActivations.get(4);
    int[] inputLengths = { 1, 2, 3, 4, 6, 12 };
    //Should get the same result regardless of step size; should be identical to standard forward pass
    for (int i = 0; i < inputLengths.length; i++) {
        int inLength = inputLengths[i];
        //each of length inLength
        int nSteps = timeSeriesLength / inLength;
        mln.rnnClearPreviousState();
        //Reset; should be set by rnnTimeStep method
        mln.setInputMiniBatchSize(1);
        for (int j = 0; j < nSteps; j++) {
            int startTimeRange = j * inLength;
            int endTimeRange = startTimeRange + inLength;
            INDArray inputSubset;
            if (inLength == 1) {
                //Workaround to nd4j bug
                int[] sizes = new int[] { input.size(0), input.size(1), 1 };
                inputSubset = Nd4j.create(sizes);
                inputSubset.tensorAlongDimension(0, 1, 0).assign(input.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(startTimeRange)));
            } else {
                inputSubset = input.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(startTimeRange, endTimeRange));
            }
            if (inLength > 1)
                assertTrue(inputSubset.size(2) == inLength);
            INDArray out = mln.rnnTimeStep(inputSubset);
            INDArray expOutSubset;
            if (inLength == 1) {
                int[] sizes = new int[] { fullOutL3.size(0), fullOutL3.size(1), 1 };
                expOutSubset = Nd4j.create(sizes);
                expOutSubset.tensorAlongDimension(0, 1, 0).assign(fullOutL3.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(startTimeRange)));
            } else {
                expOutSubset = fullOutL3.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(startTimeRange, endTimeRange));
            }
            assertEquals(expOutSubset, out);
            Map<String, INDArray> currL0State = mln.rnnGetPreviousState(0);
            Map<String, INDArray> currL1State = mln.rnnGetPreviousState(1);
            INDArray lastActL0 = currL0State.get(GravesLSTM.STATE_KEY_PREV_ACTIVATION);
            INDArray lastActL1 = currL1State.get(GravesLSTM.STATE_KEY_PREV_ACTIVATION);
            INDArray expLastActL0 = fullOutL0.tensorAlongDimension(endTimeRange - 1, 1, 0);
            INDArray expLastActL1 = fullOutL1.tensorAlongDimension(endTimeRange - 1, 1, 0);
            assertEquals(expLastActL0, lastActL0);
            assertEquals(expLastActL1, lastActL1);
        }
    }
}
Also used : RnnOutputLayer(org.deeplearning4j.nn.conf.layers.RnnOutputLayer) RnnToFeedForwardPreProcessor(org.deeplearning4j.nn.conf.preprocessor.RnnToFeedForwardPreProcessor) MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) NeuralNetConfiguration(org.deeplearning4j.nn.conf.NeuralNetConfiguration) GravesLSTM(org.deeplearning4j.nn.layers.recurrent.GravesLSTM) DenseLayer(org.deeplearning4j.nn.conf.layers.DenseLayer) INDArray(org.nd4j.linalg.api.ndarray.INDArray) NormalDistribution(org.deeplearning4j.nn.conf.distribution.NormalDistribution) FeedForwardToRnnPreProcessor(org.deeplearning4j.nn.conf.preprocessor.FeedForwardToRnnPreProcessor) Test(org.junit.Test)

Example 77 with NormalDistribution

use of org.deeplearning4j.nn.conf.distribution.NormalDistribution in project deeplearning4j by deeplearning4j.

the class MultiLayerTestRNN method testTruncatedBPTTWithMasking.

@Test
public void testTruncatedBPTTWithMasking() {
    //Extremely simple test of the 'does it throw an exception' variety
    int timeSeriesLength = 100;
    int tbpttLength = 10;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list().layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT).weightInit(WeightInit.DISTRIBUTION).nIn(8).nOut(nOut).activation(Activation.SOFTMAX).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).pretrain(false).backprop(true).backpropType(BackpropType.TruncatedBPTT).tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength).build();
    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();
    INDArray features = Nd4j.rand(new int[] { miniBatchSize, nIn, timeSeriesLength });
    INDArray labels = Nd4j.rand(new int[] { miniBatchSize, nOut, timeSeriesLength });
    INDArray maskArrayInput = Nd4j.ones(miniBatchSize, timeSeriesLength);
    INDArray maskArrayOutput = Nd4j.ones(miniBatchSize, timeSeriesLength);
    DataSet ds = new DataSet(features, labels, maskArrayInput, maskArrayOutput);
    mln.fit(ds);
}
Also used : DataSet(org.nd4j.linalg.dataset.DataSet) MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) GravesLSTM(org.deeplearning4j.nn.layers.recurrent.GravesLSTM) INDArray(org.nd4j.linalg.api.ndarray.INDArray) NormalDistribution(org.deeplearning4j.nn.conf.distribution.NormalDistribution) Test(org.junit.Test)

Example 78 with NormalDistribution

use of org.deeplearning4j.nn.conf.distribution.NormalDistribution in project deeplearning4j by deeplearning4j.

the class MultiLayerTestRNN method testTruncatedBPTTVsBPTT.

@Test
public void testTruncatedBPTTVsBPTT() {
    //Under some (limited) circumstances, we expect BPTT and truncated BPTT to be identical
    //Specifically TBPTT over entire data vector
    int timeSeriesLength = 12;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list().layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT).weightInit(WeightInit.DISTRIBUTION).nIn(8).nOut(nOut).activation(Activation.SOFTMAX).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).backprop(true).build();
    assertEquals(BackpropType.Standard, conf.getBackpropType());
    MultiLayerConfiguration confTBPTT = new NeuralNetConfiguration.Builder().seed(12345).list().layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT).weightInit(WeightInit.DISTRIBUTION).nIn(8).nOut(nOut).activation(Activation.SOFTMAX).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build()).backprop(true).backpropType(BackpropType.TruncatedBPTT).tBPTTBackwardLength(timeSeriesLength).tBPTTForwardLength(timeSeriesLength).build();
    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();
    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork mlnTBPTT = new MultiLayerNetwork(confTBPTT);
    mlnTBPTT.init();
    assertTrue(mlnTBPTT.getLayerWiseConfigurations().getBackpropType() == BackpropType.TruncatedBPTT);
    assertTrue(mlnTBPTT.getLayerWiseConfigurations().getTbpttFwdLength() == timeSeriesLength);
    assertTrue(mlnTBPTT.getLayerWiseConfigurations().getTbpttBackLength() == timeSeriesLength);
    INDArray inputData = Nd4j.rand(new int[] { miniBatchSize, nIn, timeSeriesLength });
    INDArray labels = Nd4j.rand(new int[] { miniBatchSize, nOut, timeSeriesLength });
    mln.setInput(inputData);
    mln.setLabels(labels);
    mlnTBPTT.setInput(inputData);
    mlnTBPTT.setLabels(labels);
    mln.computeGradientAndScore();
    mlnTBPTT.computeGradientAndScore();
    Pair<Gradient, Double> mlnPair = mln.gradientAndScore();
    Pair<Gradient, Double> tbpttPair = mlnTBPTT.gradientAndScore();
    assertEquals(mlnPair.getFirst().gradientForVariable(), tbpttPair.getFirst().gradientForVariable());
    assertEquals(mlnPair.getSecond(), tbpttPair.getSecond());
    //Check states: expect stateMap to be empty but tBpttStateMap to not be
    Map<String, INDArray> l0StateMLN = mln.rnnGetPreviousState(0);
    Map<String, INDArray> l0StateTBPTT = mlnTBPTT.rnnGetPreviousState(0);
    Map<String, INDArray> l1StateMLN = mln.rnnGetPreviousState(0);
    Map<String, INDArray> l1StateTBPTT = mlnTBPTT.rnnGetPreviousState(0);
    Map<String, INDArray> l0TBPTTStateMLN = ((BaseRecurrentLayer<?>) mln.getLayer(0)).rnnGetTBPTTState();
    Map<String, INDArray> l0TBPTTStateTBPTT = ((BaseRecurrentLayer<?>) mlnTBPTT.getLayer(0)).rnnGetTBPTTState();
    Map<String, INDArray> l1TBPTTStateMLN = ((BaseRecurrentLayer<?>) mln.getLayer(1)).rnnGetTBPTTState();
    Map<String, INDArray> l1TBPTTStateTBPTT = ((BaseRecurrentLayer<?>) mlnTBPTT.getLayer(1)).rnnGetTBPTTState();
    assertTrue(l0StateMLN.isEmpty());
    assertTrue(l0StateTBPTT.isEmpty());
    assertTrue(l1StateMLN.isEmpty());
    assertTrue(l1StateTBPTT.isEmpty());
    assertTrue(l0TBPTTStateMLN.isEmpty());
    assertTrue(l0TBPTTStateTBPTT.size() == 2);
    assertTrue(l1TBPTTStateMLN.isEmpty());
    assertTrue(l1TBPTTStateTBPTT.size() == 2);
    INDArray tbpttActL0 = l0TBPTTStateTBPTT.get(GravesLSTM.STATE_KEY_PREV_ACTIVATION);
    INDArray tbpttActL1 = l1TBPTTStateTBPTT.get(GravesLSTM.STATE_KEY_PREV_ACTIVATION);
    List<INDArray> activations = mln.feedForward(inputData, true);
    INDArray l0Act = activations.get(1);
    INDArray l1Act = activations.get(2);
    INDArray expL0Act = l0Act.tensorAlongDimension(timeSeriesLength - 1, 1, 0);
    INDArray expL1Act = l1Act.tensorAlongDimension(timeSeriesLength - 1, 1, 0);
    assertEquals(tbpttActL0, expL0Act);
    assertEquals(tbpttActL1, expL1Act);
}
Also used : RnnOutputLayer(org.deeplearning4j.nn.conf.layers.RnnOutputLayer) Gradient(org.deeplearning4j.nn.gradient.Gradient) NeuralNetConfiguration(org.deeplearning4j.nn.conf.NeuralNetConfiguration) MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) GravesLSTM(org.deeplearning4j.nn.layers.recurrent.GravesLSTM) INDArray(org.nd4j.linalg.api.ndarray.INDArray) NormalDistribution(org.deeplearning4j.nn.conf.distribution.NormalDistribution) BaseRecurrentLayer(org.deeplearning4j.nn.layers.recurrent.BaseRecurrentLayer) Test(org.junit.Test)

Example 79 with NormalDistribution

use of org.deeplearning4j.nn.conf.distribution.NormalDistribution in project deeplearning4j by deeplearning4j.

the class TestMasking method testPerOutputMaskingMLN.

@Test
public void testPerOutputMaskingMLN() {
    //Idea: for per-output masking, the contents of the masked label entries should make zero difference to either
    // the score or the gradients
    int nIn = 6;
    int layerSize = 4;
    INDArray mask1 = Nd4j.create(new double[] { 1, 0, 0, 1, 0 });
    INDArray mask3 = Nd4j.create(new double[][] { { 1, 1, 1, 1, 1 }, { 0, 1, 0, 1, 0 }, { 1, 0, 0, 1, 1 } });
    INDArray[] labelMasks = new INDArray[] { mask1, mask3 };
    ILossFunction[] lossFunctions = new ILossFunction[] { new LossBinaryXENT(), //                new LossCosineProximity(),    //Doesn't support per-output masking, as it doesn't make sense for cosine proximity
    new LossHinge(), new LossKLD(), new LossKLD(), new LossL1(), new LossL2(), new LossMAE(), new LossMAE(), new LossMAPE(), new LossMAPE(), //                new LossMCXENT(),             //Per output masking on MCXENT+Softmax: not yet supported
    new LossMCXENT(), new LossMSE(), new LossMSE(), new LossMSLE(), new LossMSLE(), new LossNegativeLogLikelihood(), new LossPoisson(), new LossSquaredHinge() };
    Activation[] act = new Activation[] { //XENT
    Activation.SIGMOID, //Hinge
    Activation.TANH, //KLD
    Activation.SIGMOID, //KLD + softmax
    Activation.SOFTMAX, //L1
    Activation.TANH, //L2
    Activation.TANH, //MAE
    Activation.TANH, //MAE + softmax
    Activation.SOFTMAX, //MAPE
    Activation.TANH, //MAPE + softmax
    Activation.SOFTMAX, //MCXENT + sigmoid
    Activation.SIGMOID, //MSE
    Activation.TANH, //MSE + softmax
    Activation.SOFTMAX, //MSLE - needs positive labels/activations (due to log)
    Activation.SIGMOID, //MSLE + softmax
    Activation.SOFTMAX, //NLL
    Activation.SIGMOID, //Poisson
    Activation.SIGMOID, //Squared hinge
    Activation.TANH };
    for (INDArray labelMask : labelMasks) {
        int minibatch = labelMask.size(0);
        int nOut = labelMask.size(1);
        for (int i = 0; i < lossFunctions.length; i++) {
            ILossFunction lf = lossFunctions[i];
            Activation a = act[i];
            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(Updater.NONE).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).seed(12345).list().layer(0, new DenseLayer.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH).build()).layer(1, new OutputLayer.Builder().nIn(layerSize).nOut(nOut).lossFunction(lf).activation(a).build()).build();
            MultiLayerNetwork net = new MultiLayerNetwork(conf);
            net.init();
            net.setLayerMaskArrays(null, labelMask);
            INDArray[] fl = LossFunctionGradientCheck.getFeaturesAndLabels(lf, minibatch, nIn, nOut, 12345);
            INDArray features = fl[0];
            INDArray labels = fl[1];
            net.setInput(features);
            net.setLabels(labels);
            net.computeGradientAndScore();
            double score1 = net.score();
            INDArray grad1 = net.gradient().gradient();
            //Now: change the label values for the masked steps. The
            INDArray maskZeroLocations = Nd4j.getExecutioner().execAndReturn(new Not(labelMask.dup()));
            INDArray rand = Nd4j.rand(maskZeroLocations.shape()).muli(0.5);
            //Only the masked values are changed
            INDArray newLabels = labels.add(rand.muli(maskZeroLocations));
            net.setLabels(newLabels);
            net.computeGradientAndScore();
            assertNotEquals(labels, newLabels);
            double score2 = net.score();
            INDArray grad2 = net.gradient().gradient();
            assertEquals(score1, score2, 1e-6);
            assertEquals(grad1, grad2);
            //Do the same for CompGraph
            ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().updater(Updater.NONE).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).seed(12345).graphBuilder().addInputs("in").addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH).build(), "in").addLayer("1", new OutputLayer.Builder().nIn(layerSize).nOut(nOut).lossFunction(lf).activation(a).build(), "0").setOutputs("1").build();
            ComputationGraph graph = new ComputationGraph(conf2);
            graph.init();
            graph.setLayerMaskArrays(null, new INDArray[] { labelMask });
            graph.setInputs(features);
            graph.setLabels(labels);
            graph.computeGradientAndScore();
            double gScore1 = graph.score();
            INDArray gGrad1 = graph.gradient().gradient();
            graph.setLabels(newLabels);
            graph.computeGradientAndScore();
            double gScore2 = graph.score();
            INDArray gGrad2 = graph.gradient().gradient();
            assertEquals(gScore1, gScore2, 1e-6);
            assertEquals(gGrad1, gGrad2);
        }
    }
}
Also used : OutputLayer(org.deeplearning4j.nn.conf.layers.OutputLayer) RnnOutputLayer(org.deeplearning4j.nn.conf.layers.RnnOutputLayer) Activation(org.nd4j.linalg.activations.Activation) ComputationGraph(org.deeplearning4j.nn.graph.ComputationGraph) ILossFunction(org.nd4j.linalg.lossfunctions.ILossFunction) Not(org.nd4j.linalg.api.ops.impl.transforms.Not) INDArray(org.nd4j.linalg.api.ndarray.INDArray) DenseLayer(org.deeplearning4j.nn.conf.layers.DenseLayer) NormalDistribution(org.deeplearning4j.nn.conf.distribution.NormalDistribution) Test(org.junit.Test)

Example 80 with NormalDistribution

use of org.deeplearning4j.nn.conf.distribution.NormalDistribution in project deeplearning4j by deeplearning4j.

the class TestSetGetParameters method testSetParameters.

@Test
public void testSetParameters() {
    //Set up a MLN, then do set(get) on parameters. Results should be identical compared to before doing this.
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list().layer(0, new DenseLayer.Builder().nIn(9).nOut(10).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).build()).layer(1, new RBM.Builder().nIn(10).nOut(11).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).build()).layer(2, new AutoEncoder.Builder().corruptionLevel(0.5).nIn(11).nOut(12).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).build()).layer(3, new OutputLayer.Builder(LossFunction.MSE).nIn(12).nOut(12).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).build()).build();
    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();
    INDArray initParams = net.params().dup();
    Map<String, INDArray> initParams2 = net.paramTable();
    net.setParams(net.params());
    INDArray initParamsAfter = net.params();
    Map<String, INDArray> initParams2After = net.paramTable();
    for (String s : initParams2.keySet()) {
        assertTrue("Params differ: " + s, initParams2.get(s).equals(initParams2After.get(s)));
    }
    assertEquals(initParams, initParamsAfter);
    //Now, try the other way: get(set(random))
    INDArray randomParams = Nd4j.rand(initParams.shape());
    net.setParams(randomParams.dup());
    assertEquals(net.params(), randomParams);
}
Also used : MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) INDArray(org.nd4j.linalg.api.ndarray.INDArray) NormalDistribution(org.deeplearning4j.nn.conf.distribution.NormalDistribution) Test(org.junit.Test)

Aggregations

NormalDistribution (org.deeplearning4j.nn.conf.distribution.NormalDistribution)90 Test (org.junit.Test)87 INDArray (org.nd4j.linalg.api.ndarray.INDArray)76 MultiLayerConfiguration (org.deeplearning4j.nn.conf.MultiLayerConfiguration)49 MultiLayerNetwork (org.deeplearning4j.nn.multilayer.MultiLayerNetwork)43 NeuralNetConfiguration (org.deeplearning4j.nn.conf.NeuralNetConfiguration)41 Random (java.util.Random)28 ComputationGraphConfiguration (org.deeplearning4j.nn.conf.ComputationGraphConfiguration)28 ComputationGraph (org.deeplearning4j.nn.graph.ComputationGraph)22 GravesLSTM (org.deeplearning4j.nn.layers.recurrent.GravesLSTM)13 DataSet (org.nd4j.linalg.dataset.DataSet)13 RnnOutputLayer (org.deeplearning4j.nn.conf.layers.RnnOutputLayer)12 IrisDataSetIterator (org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator)9 RnnToFeedForwardPreProcessor (org.deeplearning4j.nn.conf.preprocessor.RnnToFeedForwardPreProcessor)6 Activation (org.nd4j.linalg.activations.Activation)5 DataSetIterator (org.nd4j.linalg.dataset.api.iterator.DataSetIterator)5 DataNormalization (org.nd4j.linalg.dataset.api.preprocessor.DataNormalization)5 NormalizerMinMaxScaler (org.nd4j.linalg.dataset.api.preprocessor.NormalizerMinMaxScaler)5 LossFunction (org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction)5 DenseLayer (org.deeplearning4j.nn.conf.layers.DenseLayer)4