use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigValidationTest method testLRPolicyMissingDecayRate.
@Test(expected = IllegalStateException.class)
public void testLRPolicyMissingDecayRate() {
double lr = 2;
double power = 3;
int iterations = 1;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(iterations).learningRate(lr).learningRateDecayPolicy(LearningRatePolicy.Inverse).lrPolicyPower(power).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
}
use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigValidationTest method testLRPolicyMissingSchedule.
@Test(expected = IllegalStateException.class)
public void testLRPolicyMissingSchedule() {
double lr = 2;
double lrDecayRate = 5;
int iterations = 1;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(iterations).learningRate(lr).learningRateDecayPolicy(LearningRatePolicy.Schedule).lrPolicyDecayRate(lrDecayRate).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
}
use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigValidationTest method testAdaDeltaValidation.
@Test
public void testAdaDeltaValidation() {
// Warnings only thrown
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().rho(0.5).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).rho(0.01).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
}
use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigValidationTest method testPredefinedConfigValues.
@Test
public void testPredefinedConfigValues() {
double expectedMomentum = 0.9;
double expectedAdamMeanDecay = 0.9;
double expectedAdamVarDecay = 0.999;
double expectedRmsDecay = 0.95;
Distribution expectedDist = new NormalDistribution(0, 1);
double expectedL1 = 0.0;
double expectedL2 = 0.0;
// Nesterovs Updater
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().learningRate(0.3).updater(Updater.NESTEROVS).regularization(true).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.5).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).momentum(0.4).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
Layer layerConf = net.getLayer(0).conf().getLayer();
assertEquals(expectedMomentum, layerConf.getMomentum(), 1e-3);
assertEquals(expectedL1, layerConf.getL1(), 1e-3);
assertEquals(0.5, layerConf.getL2(), 1e-3);
Layer layerConf1 = net.getLayer(1).conf().getLayer();
assertEquals(0.4, layerConf1.getMomentum(), 1e-3);
// Adam Updater
conf = new NeuralNetConfiguration.Builder().learningRate(0.3).updater(Updater.ADAM).regularization(true).weightInit(WeightInit.DISTRIBUTION).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.5).l1(0.3).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build();
net = new MultiLayerNetwork(conf);
net.init();
layerConf = net.getLayer(0).conf().getLayer();
assertEquals(0.3, layerConf.getL1(), 1e-3);
assertEquals(0.5, layerConf.getL2(), 1e-3);
layerConf1 = net.getLayer(1).conf().getLayer();
assertEquals(expectedAdamMeanDecay, layerConf1.getAdamMeanDecay(), 1e-3);
assertEquals(expectedAdamVarDecay, layerConf1.getAdamVarDecay(), 1e-3);
assertEquals(expectedDist, layerConf1.getDist());
// l1 & l2 local should still be set whether regularization true or false
assertEquals(expectedL1, layerConf1.getL1(), 1e-3);
assertEquals(expectedL2, layerConf1.getL2(), 1e-3);
//RMSProp Updater
conf = new NeuralNetConfiguration.Builder().learningRate(0.3).updater(Updater.RMSPROP).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).rmsDecay(0.4).build()).build();
net = new MultiLayerNetwork(conf);
net.init();
layerConf = net.getLayer(0).conf().getLayer();
assertEquals(expectedRmsDecay, layerConf.getRmsDecay(), 1e-3);
assertEquals(expectedL1, layerConf.getL1(), 1e-3);
assertEquals(expectedL2, layerConf.getL2(), 1e-3);
layerConf1 = net.getLayer(1).conf().getLayer();
assertEquals(0.4, layerConf1.getRmsDecay(), 1e-3);
}
use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigTest method testDropoutLayerwiseOverride.
@Test
public void testDropoutLayerwiseOverride() {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().dropOut(1.0).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals(1.0, conf.getConf(0).getLayer().getDropOut(), 0.0);
assertEquals(1.0, conf.getConf(1).getLayer().getDropOut(), 0.0);
conf = new NeuralNetConfiguration.Builder().dropOut(1.0).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).dropOut(2.0).build()).build();
net = new MultiLayerNetwork(conf);
net.init();
assertEquals(1.0, conf.getConf(0).getLayer().getDropOut(), 0.0);
assertEquals(2.0, conf.getConf(1).getLayer().getDropOut(), 0.0);
}
Aggregations