use of org.deeplearning4j.nn.conf.distribution.Distribution in project deeplearning4j by deeplearning4j.
the class LayerConfigValidationTest method testPredefinedConfigValues.
@Test
public void testPredefinedConfigValues() {
double expectedMomentum = 0.9;
double expectedAdamMeanDecay = 0.9;
double expectedAdamVarDecay = 0.999;
double expectedRmsDecay = 0.95;
Distribution expectedDist = new NormalDistribution(0, 1);
double expectedL1 = 0.0;
double expectedL2 = 0.0;
// Nesterovs Updater
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().learningRate(0.3).updater(Updater.NESTEROVS).regularization(true).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.5).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).momentum(0.4).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
Layer layerConf = net.getLayer(0).conf().getLayer();
assertEquals(expectedMomentum, layerConf.getMomentum(), 1e-3);
assertEquals(expectedL1, layerConf.getL1(), 1e-3);
assertEquals(0.5, layerConf.getL2(), 1e-3);
Layer layerConf1 = net.getLayer(1).conf().getLayer();
assertEquals(0.4, layerConf1.getMomentum(), 1e-3);
// Adam Updater
conf = new NeuralNetConfiguration.Builder().learningRate(0.3).updater(Updater.ADAM).regularization(true).weightInit(WeightInit.DISTRIBUTION).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.5).l1(0.3).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build();
net = new MultiLayerNetwork(conf);
net.init();
layerConf = net.getLayer(0).conf().getLayer();
assertEquals(0.3, layerConf.getL1(), 1e-3);
assertEquals(0.5, layerConf.getL2(), 1e-3);
layerConf1 = net.getLayer(1).conf().getLayer();
assertEquals(expectedAdamMeanDecay, layerConf1.getAdamMeanDecay(), 1e-3);
assertEquals(expectedAdamVarDecay, layerConf1.getAdamVarDecay(), 1e-3);
assertEquals(expectedDist, layerConf1.getDist());
// l1 & l2 local should still be set whether regularization true or false
assertEquals(expectedL1, layerConf1.getL1(), 1e-3);
assertEquals(expectedL2, layerConf1.getL2(), 1e-3);
//RMSProp Updater
conf = new NeuralNetConfiguration.Builder().learningRate(0.3).updater(Updater.RMSPROP).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).rmsDecay(0.4).build()).build();
net = new MultiLayerNetwork(conf);
net.init();
layerConf = net.getLayer(0).conf().getLayer();
assertEquals(expectedRmsDecay, layerConf.getRmsDecay(), 1e-3);
assertEquals(expectedL1, layerConf.getL1(), 1e-3);
assertEquals(expectedL2, layerConf.getL2(), 1e-3);
layerConf1 = net.getLayer(1).conf().getLayer();
assertEquals(0.4, layerConf1.getRmsDecay(), 1e-3);
}
Aggregations