use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigTest method testLearningRatePolicyNone.
@Test
public void testLearningRatePolicyNone() {
double lr = 2;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().learningRate(lr).learningRateDecayPolicy(LearningRatePolicy.None).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals(LearningRatePolicy.None, conf.getConf(0).getLearningRatePolicy());
assertEquals(LearningRatePolicy.None, conf.getConf(1).getLearningRatePolicy());
}
use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigTest method testLayerName.
@Test
public void testLayerName() {
String name1 = "genisys";
String name2 = "bill";
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).name(name1).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).name(name2).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals(name1, conf.getConf(0).getLayer().getLayerName().toString());
assertEquals(name2, conf.getConf(1).getLayer().getLayerName().toString());
}
use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigTest method testLearningRatePolicySigmoid.
@Test
public void testLearningRatePolicySigmoid() {
double lr = 2;
double lrDecayRate = 5;
double steps = 4;
int iterations = 1;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(iterations).learningRate(lr).learningRateDecayPolicy(LearningRatePolicy.Sigmoid).lrPolicyDecayRate(lrDecayRate).lrPolicySteps(steps).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals(LearningRatePolicy.Sigmoid, conf.getConf(0).getLearningRatePolicy());
assertEquals(LearningRatePolicy.Sigmoid, conf.getConf(1).getLearningRatePolicy());
assertEquals(lrDecayRate, conf.getConf(0).getLrPolicyDecayRate(), 0.0);
assertEquals(lrDecayRate, conf.getConf(1).getLrPolicyDecayRate(), 0.0);
assertEquals(steps, conf.getConf(0).getLrPolicySteps(), 0.0);
assertEquals(steps, conf.getConf(1).getLrPolicySteps(), 0.0);
}
use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigTest method testUpdaterRhoRmsDecayLayerwiseOverride.
@Test
public void testUpdaterRhoRmsDecayLayerwiseOverride() {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(Updater.ADADELTA).rho(0.5).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).rho(0.01).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals("ADADELTA", conf.getConf(0).getLayer().getUpdater().toString());
assertEquals("ADADELTA", conf.getConf(1).getLayer().getUpdater().toString());
assertEquals(0.5, conf.getConf(0).getLayer().getRho(), 0.0);
assertEquals(0.01, conf.getConf(1).getLayer().getRho(), 0.0);
conf = new NeuralNetConfiguration.Builder().updater(Updater.RMSPROP).rmsDecay(2.0).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).rmsDecay(1.0).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).updater(Updater.ADADELTA).rho(0.5).build()).build();
net = new MultiLayerNetwork(conf);
net.init();
assertEquals("RMSPROP", conf.getConf(0).getLayer().getUpdater().toString());
assertEquals("ADADELTA", conf.getConf(1).getLayer().getUpdater().toString());
assertEquals(0.5, conf.getConf(1).getLayer().getRho(), 0.0);
assertEquals(1.0, conf.getConf(0).getLayer().getRmsDecay(), 0.0);
}
use of org.deeplearning4j.nn.multilayer.MultiLayerNetwork in project deeplearning4j by deeplearning4j.
the class LayerConfigTest method testLearningRatePolicyPoly.
@Test
public void testLearningRatePolicyPoly() {
double lr = 2;
double lrDecayRate = 5;
double power = 3;
int iterations = 1;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(iterations).learningRate(lr).learningRateDecayPolicy(LearningRatePolicy.Poly).lrPolicyDecayRate(lrDecayRate).lrPolicyPower(power).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals(LearningRatePolicy.Poly, conf.getConf(0).getLearningRatePolicy());
assertEquals(LearningRatePolicy.Poly, conf.getConf(1).getLearningRatePolicy());
assertEquals(lrDecayRate, conf.getConf(0).getLrPolicyDecayRate(), 0.0);
assertEquals(lrDecayRate, conf.getConf(1).getLrPolicyDecayRate(), 0.0);
assertEquals(power, conf.getConf(0).getLrPolicyPower(), 0.0);
assertEquals(power, conf.getConf(1).getLrPolicyPower(), 0.0);
}
Aggregations