Search in sources :

Example 1 with MultiLayerConfiguration

use of org.deeplearning4j.nn.conf.MultiLayerConfiguration in project deeplearning4j by deeplearning4j.

the class DropoutLayerTest method testDropoutLayerWithConvMnist.

@Test
public void testDropoutLayerWithConvMnist() throws Exception {
    DataSetIterator iter = new MnistDataSetIterator(2, 2);
    DataSet next = iter.next();
    // Run without separate activation layer
    MultiLayerConfiguration confIntegrated = new NeuralNetConfiguration.Builder().optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1).seed(123).list().layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20).activation(Activation.RELU).weightInit(WeightInit.XAVIER).build()).layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).dropOut(0.25).nOut(10).build()).backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
    MultiLayerNetwork netIntegrated = new MultiLayerNetwork(confIntegrated);
    netIntegrated.init();
    netIntegrated.fit(next);
    // Run with separate activation layer
    MultiLayerConfiguration confSeparate = new NeuralNetConfiguration.Builder().optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1).seed(123).list().layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20).activation(Activation.RELU).weightInit(WeightInit.XAVIER).build()).layer(1, new DropoutLayer.Builder(0.25).build()).layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nOut(10).build()).backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
    MultiLayerNetwork netSeparate = new MultiLayerNetwork(confSeparate);
    netSeparate.init();
    netSeparate.fit(next);
    // check parameters
    assertEquals(netIntegrated.getLayer(0).getParam("W"), netSeparate.getLayer(0).getParam("W"));
    assertEquals(netIntegrated.getLayer(0).getParam("b"), netSeparate.getLayer(0).getParam("b"));
    assertEquals(netIntegrated.getLayer(1).getParam("W"), netSeparate.getLayer(2).getParam("W"));
    assertEquals(netIntegrated.getLayer(1).getParam("b"), netSeparate.getLayer(2).getParam("b"));
    // check activations
    netIntegrated.setInput(next.getFeatureMatrix());
    netSeparate.setInput(next.getFeatureMatrix());
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTrainIntegrated = netIntegrated.feedForward(true);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTrainSeparate = netSeparate.feedForward(true);
    assertEquals(actTrainIntegrated.get(1), actTrainSeparate.get(1));
    assertEquals(actTrainIntegrated.get(2), actTrainSeparate.get(3));
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTestIntegrated = netIntegrated.feedForward(false);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTestSeparate = netSeparate.feedForward(false);
    assertEquals(actTestIntegrated.get(1), actTrainSeparate.get(1));
    assertEquals(actTestIntegrated.get(2), actTestSeparate.get(3));
}
Also used : OutputLayer(org.deeplearning4j.nn.conf.layers.OutputLayer) MnistDataSetIterator(org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator) DataSet(org.nd4j.linalg.dataset.DataSet) DropoutLayer(org.deeplearning4j.nn.conf.layers.DropoutLayer) NeuralNetConfiguration(org.deeplearning4j.nn.conf.NeuralNetConfiguration) ConvolutionLayer(org.deeplearning4j.nn.conf.layers.ConvolutionLayer) MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) INDArray(org.nd4j.linalg.api.ndarray.INDArray) MultiLayerNetwork(org.deeplearning4j.nn.multilayer.MultiLayerNetwork) DataSetIterator(org.nd4j.linalg.dataset.api.iterator.DataSetIterator) MnistDataSetIterator(org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator) Test(org.junit.Test)

Example 2 with MultiLayerConfiguration

use of org.deeplearning4j.nn.conf.MultiLayerConfiguration in project deeplearning4j by deeplearning4j.

the class OutputLayerTest method testRnnOutputLayerIncEdgeCases.

@Test
public void testRnnOutputLayerIncEdgeCases() {
    //Basic test + test edge cases: timeSeriesLength==1, miniBatchSize==1, both
    int[] tsLength = { 5, 1, 5, 1 };
    int[] miniBatch = { 7, 7, 1, 1 };
    int nIn = 3;
    int nOut = 6;
    int layerSize = 4;
    FeedForwardToRnnPreProcessor proc = new FeedForwardToRnnPreProcessor();
    for (int t = 0; t < tsLength.length; t++) {
        Nd4j.getRandom().setSeed(12345);
        int timeSeriesLength = tsLength[t];
        int miniBatchSize = miniBatch[t];
        Random r = new Random(12345L);
        INDArray input = Nd4j.zeros(miniBatchSize, nIn, timeSeriesLength);
        for (int i = 0; i < miniBatchSize; i++) {
            for (int j = 0; j < nIn; j++) {
                for (int k = 0; k < timeSeriesLength; k++) {
                    input.putScalar(new int[] { i, j, k }, r.nextDouble() - 0.5);
                }
            }
        }
        INDArray labels3d = Nd4j.zeros(miniBatchSize, nOut, timeSeriesLength);
        for (int i = 0; i < miniBatchSize; i++) {
            for (int j = 0; j < timeSeriesLength; j++) {
                int idx = r.nextInt(nOut);
                labels3d.putScalar(new int[] { i, idx, j }, 1.0f);
            }
        }
        INDArray labels2d = proc.backprop(labels3d, miniBatchSize);
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L).list().layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).activation(Activation.TANH).updater(Updater.NONE).build()).layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(Updater.NONE).build()).inputPreProcessor(1, new RnnToFeedForwardPreProcessor()).pretrain(false).backprop(true).build();
        MultiLayerNetwork mln = new MultiLayerNetwork(conf);
        mln.init();
        INDArray out2d = mln.feedForward(input).get(2);
        INDArray out3d = proc.preProcess(out2d, miniBatchSize);
        MultiLayerConfiguration confRnn = new NeuralNetConfiguration.Builder().seed(12345L).list().layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).activation(Activation.TANH).updater(Updater.NONE).build()).layer(1, new org.deeplearning4j.nn.conf.layers.RnnOutputLayer.Builder(LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(Updater.NONE).build()).pretrain(false).backprop(true).build();
        MultiLayerNetwork mlnRnn = new MultiLayerNetwork(confRnn);
        mlnRnn.init();
        INDArray outRnn = mlnRnn.feedForward(input).get(2);
        mln.setLabels(labels2d);
        mlnRnn.setLabels(labels3d);
        mln.computeGradientAndScore();
        mlnRnn.computeGradientAndScore();
        //score is average over all examples.
        //However: OutputLayer version has miniBatch*timeSeriesLength "examples" (after reshaping)
        //RnnOutputLayer has miniBatch examples
        //Hence: expect difference in scores by factor of timeSeriesLength
        double score = mln.score() * timeSeriesLength;
        double scoreRNN = mlnRnn.score();
        assertTrue(!Double.isNaN(score));
        assertTrue(!Double.isNaN(scoreRNN));
        double relError = Math.abs(score - scoreRNN) / (Math.abs(score) + Math.abs(scoreRNN));
        System.out.println(relError);
        assertTrue(relError < 1e-6);
        //Check labels and inputs for output layer:
        OutputLayer ol = (OutputLayer) mln.getOutputLayer();
        assertArrayEquals(ol.getInput().shape(), new int[] { miniBatchSize * timeSeriesLength, layerSize });
        assertArrayEquals(ol.getLabels().shape(), new int[] { miniBatchSize * timeSeriesLength, nOut });
        RnnOutputLayer rnnol = (RnnOutputLayer) mlnRnn.getOutputLayer();
        //assertArrayEquals(rnnol.getInput().shape(),new int[]{miniBatchSize,layerSize,timeSeriesLength});
        //Input may be set by BaseLayer methods. Thus input may end up as reshaped 2d version instead of original 3d version.
        //Not ideal, but everything else works.
        assertArrayEquals(rnnol.getLabels().shape(), new int[] { miniBatchSize, nOut, timeSeriesLength });
        //Check shapes of output for both:
        assertArrayEquals(out2d.shape(), new int[] { miniBatchSize * timeSeriesLength, nOut });
        INDArray out = mln.output(input);
        assertArrayEquals(out.shape(), new int[] { miniBatchSize * timeSeriesLength, nOut });
        INDArray act = mln.activate();
        assertArrayEquals(act.shape(), new int[] { miniBatchSize * timeSeriesLength, nOut });
        INDArray preout = mln.preOutput(input);
        assertArrayEquals(preout.shape(), new int[] { miniBatchSize * timeSeriesLength, nOut });
        INDArray outFFRnn = mlnRnn.feedForward(input).get(2);
        assertArrayEquals(outFFRnn.shape(), new int[] { miniBatchSize, nOut, timeSeriesLength });
        INDArray outRnn2 = mlnRnn.output(input);
        assertArrayEquals(outRnn2.shape(), new int[] { miniBatchSize, nOut, timeSeriesLength });
        INDArray actRnn = mlnRnn.activate();
        assertArrayEquals(actRnn.shape(), new int[] { miniBatchSize, nOut, timeSeriesLength });
        INDArray preoutRnn = mlnRnn.preOutput(input);
        assertArrayEquals(preoutRnn.shape(), new int[] { miniBatchSize, nOut, timeSeriesLength });
    }
}
Also used : RnnOutputLayer(org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer) RnnOutputLayer(org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer) NeuralNetConfiguration(org.deeplearning4j.nn.conf.NeuralNetConfiguration) RnnToFeedForwardPreProcessor(org.deeplearning4j.nn.conf.preprocessor.RnnToFeedForwardPreProcessor) MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) Random(java.util.Random) INDArray(org.nd4j.linalg.api.ndarray.INDArray) GravesLSTM(org.deeplearning4j.nn.conf.layers.GravesLSTM) NormalDistribution(org.deeplearning4j.nn.conf.distribution.NormalDistribution) FeedForwardToRnnPreProcessor(org.deeplearning4j.nn.conf.preprocessor.FeedForwardToRnnPreProcessor) MultiLayerNetwork(org.deeplearning4j.nn.multilayer.MultiLayerNetwork) Test(org.junit.Test)

Example 3 with MultiLayerConfiguration

use of org.deeplearning4j.nn.conf.MultiLayerConfiguration in project deeplearning4j by deeplearning4j.

the class GlobalPoolingMaskingTests method testMaskingCnnDim2_SingleExample.

@Test
public void testMaskingCnnDim2_SingleExample() {
    //Test masking, where mask is along dimension 2
    int minibatch = 1;
    int depthIn = 2;
    int depthOut = 2;
    int nOut = 2;
    int height = 6;
    int width = 3;
    PoolingType[] poolingTypes = new PoolingType[] { PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM };
    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).convolutionMode(ConvolutionMode.Same).seed(12345L).list().layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width).stride(1, width).activation(Activation.TANH).build()).layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt).build()).layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build()).pretrain(false).backprop(true).build();
        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();
        INDArray inToBeMasked = Nd4j.rand(new int[] { minibatch, depthIn, height, width });
        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[] { 1, 1, 1, 1, 1, 0 });
        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));
        net.setLayerMaskArrays(maskArray, null);
        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();
        int numSteps = height - 1;
        INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
        assertArrayEquals(new int[] { 1, depthIn, 5, width }, subset.shape());
        INDArray outSubset = net.output(subset);
        INDArray outMaskedSubset = outMasked.getRow(0);
        assertEquals(outSubset, outMaskedSubset);
        //Finally: check gradient calc for exceptions
        net.setLayerMaskArrays(maskArray, null);
        net.setInput(inToBeMasked);
        INDArray labels = Nd4j.create(new double[] { 0, 1 });
        net.setLabels(labels);
        net.computeGradientAndScore();
    }
}
Also used : MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) INDArray(org.nd4j.linalg.api.ndarray.INDArray) BroadcastMulOp(org.nd4j.linalg.api.ops.impl.broadcast.BroadcastMulOp) MultiLayerNetwork(org.deeplearning4j.nn.multilayer.MultiLayerNetwork) Test(org.junit.Test)

Example 4 with MultiLayerConfiguration

use of org.deeplearning4j.nn.conf.MultiLayerConfiguration in project deeplearning4j by deeplearning4j.

the class GlobalPoolingMaskingTests method testMaskingCnnDim3_SingleExample.

@Test
public void testMaskingCnnDim3_SingleExample() {
    //Test masking, where mask is along dimension 3
    int minibatch = 1;
    int depthIn = 2;
    int depthOut = 2;
    int nOut = 2;
    int height = 3;
    int width = 6;
    PoolingType[] poolingTypes = new PoolingType[] { PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM };
    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).convolutionMode(ConvolutionMode.Same).seed(12345L).list().layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2).stride(height, 1).activation(Activation.TANH).build()).layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt).build()).layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build()).pretrain(false).backprop(true).build();
        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();
        INDArray inToBeMasked = Nd4j.rand(new int[] { minibatch, depthIn, height, width });
        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[] { 1, 1, 1, 1, 1, 0 });
        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));
        net.setLayerMaskArrays(maskArray, null);
        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();
        int numSteps = width - 1;
        INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
        assertArrayEquals(new int[] { 1, depthIn, height, 5 }, subset.shape());
        INDArray outSubset = net.output(subset);
        INDArray outMaskedSubset = outMasked.getRow(0);
        assertEquals(outSubset, outMaskedSubset);
        //Finally: check gradient calc for exceptions
        net.setLayerMaskArrays(maskArray, null);
        net.setInput(inToBeMasked);
        INDArray labels = Nd4j.create(new double[] { 0, 1 });
        net.setLabels(labels);
        net.computeGradientAndScore();
    }
}
Also used : MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) INDArray(org.nd4j.linalg.api.ndarray.INDArray) BroadcastMulOp(org.nd4j.linalg.api.ops.impl.broadcast.BroadcastMulOp) MultiLayerNetwork(org.deeplearning4j.nn.multilayer.MultiLayerNetwork) Test(org.junit.Test)

Example 5 with MultiLayerConfiguration

use of org.deeplearning4j.nn.conf.MultiLayerConfiguration in project deeplearning4j by deeplearning4j.

the class GravesBidirectionalLSTMTest method testConvergence.

@Test
@Ignore
public void testConvergence() {
    Nd4j.getRandom().setSeed(12345);
    final int state1Len = 100;
    final int state2Len = 30;
    //segment by signal mean
    //Data: has shape [miniBatchSize,nIn,timeSeriesLength];
    final INDArray sig1 = Nd4j.randn(new int[] { 1, 2, state1Len }).mul(0.1);
    final INDArray sig2 = Nd4j.randn(new int[] { 1, 2, state2Len }).mul(0.1).add(Nd4j.ones(new int[] { 1, 2, state2Len }).mul(1.0));
    INDArray sig = Nd4j.concat(2, sig1, sig2);
    INDArray labels = Nd4j.zeros(new int[] { 1, 2, state1Len + state2Len });
    for (int t = 0; t < state1Len; t++) {
        labels.putScalar(new int[] { 0, 0, t }, 1.0);
    }
    for (int t = state1Len; t < state1Len + state2Len; t++) {
        labels.putScalar(new int[] { 0, 1, t }, 1.0);
    }
    for (int i = 0; i < 3; i++) {
        sig = Nd4j.concat(2, sig, sig);
        labels = Nd4j.concat(2, labels, labels);
    }
    final DataSet ds = new DataSet(sig, labels);
    final MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(5).learningRate(0.1).rmsDecay(0.95).regularization(true).l2(0.001).updater(Updater.ADAGRAD).seed(12345).list().pretrain(false).layer(0, new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder().activation(Activation.TANH).nIn(2).nOut(2).weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-0.05, 0.05)).build()).layer(1, new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder().activation(Activation.TANH).nIn(2).nOut(2).weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-0.05, 0.05)).build()).layer(2, new org.deeplearning4j.nn.conf.layers.RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(2).activation(Activation.TANH).build()).backprop(true).build();
    final MultiLayerNetwork net = new MultiLayerNetwork(conf);
    final IterationListener scoreSaver = new IterationListener() {

        @Override
        public boolean invoked() {
            return false;
        }

        @Override
        public void invoke() {
        }

        @Override
        public void iterationDone(Model model, int iteration) {
            score = model.score();
        }
    };
    net.setListeners(scoreSaver, new ScoreIterationListener(1));
    double oldScore = Double.POSITIVE_INFINITY;
    net.init();
    for (int iEpoch = 0; iEpoch < 3; iEpoch++) {
        net.fit(ds);
        System.out.print(String.format("score is %f%n", score));
        assertTrue(!Double.isNaN(score));
        assertTrue(score < 0.9 * oldScore);
        oldScore = score;
        final INDArray output = net.output(ds.getFeatureMatrix());
        Evaluation evaluation = new Evaluation();
        evaluation.evalTimeSeries(ds.getLabels(), output);
        System.out.print(evaluation.stats() + "\n");
    }
}
Also used : Evaluation(org.deeplearning4j.eval.Evaluation) DataSet(org.nd4j.linalg.dataset.DataSet) UniformDistribution(org.deeplearning4j.nn.conf.distribution.UniformDistribution) NeuralNetConfiguration(org.deeplearning4j.nn.conf.NeuralNetConfiguration) MultiLayerConfiguration(org.deeplearning4j.nn.conf.MultiLayerConfiguration) INDArray(org.nd4j.linalg.api.ndarray.INDArray) IterationListener(org.deeplearning4j.optimize.api.IterationListener) ScoreIterationListener(org.deeplearning4j.optimize.listeners.ScoreIterationListener) Model(org.deeplearning4j.nn.api.Model) MultiLayerNetwork(org.deeplearning4j.nn.multilayer.MultiLayerNetwork) ScoreIterationListener(org.deeplearning4j.optimize.listeners.ScoreIterationListener) Ignore(org.junit.Ignore) Test(org.junit.Test)

Aggregations

MultiLayerConfiguration (org.deeplearning4j.nn.conf.MultiLayerConfiguration)245 Test (org.junit.Test)225 MultiLayerNetwork (org.deeplearning4j.nn.multilayer.MultiLayerNetwork)194 INDArray (org.nd4j.linalg.api.ndarray.INDArray)132 NeuralNetConfiguration (org.deeplearning4j.nn.conf.NeuralNetConfiguration)123 DataSet (org.nd4j.linalg.dataset.DataSet)64 DataSetIterator (org.nd4j.linalg.dataset.api.iterator.DataSetIterator)59 DenseLayer (org.deeplearning4j.nn.conf.layers.DenseLayer)46 IrisDataSetIterator (org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator)45 OutputLayer (org.deeplearning4j.nn.conf.layers.OutputLayer)45 NormalDistribution (org.deeplearning4j.nn.conf.distribution.NormalDistribution)42 ScoreIterationListener (org.deeplearning4j.optimize.listeners.ScoreIterationListener)32 MnistDataSetIterator (org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator)29 ConvolutionLayer (org.deeplearning4j.nn.conf.layers.ConvolutionLayer)27 Random (java.util.Random)26 DL4JException (org.deeplearning4j.exception.DL4JException)20 BaseSparkTest (org.deeplearning4j.spark.BaseSparkTest)18 InMemoryModelSaver (org.deeplearning4j.earlystopping.saver.InMemoryModelSaver)17 MaxEpochsTerminationCondition (org.deeplearning4j.earlystopping.termination.MaxEpochsTerminationCondition)17 SparkDl4jMultiLayer (org.deeplearning4j.spark.impl.multilayer.SparkDl4jMultiLayer)17