use of org.deeplearning4j.optimize.api.ConvexOptimizer in project deeplearning4j by deeplearning4j.
the class TestOptimizers method testRosenbrockFnMultipleStepsHelper.
private static void testRosenbrockFnMultipleStepsHelper(OptimizationAlgorithm oa, int nOptIter, int maxNumLineSearchIter) {
double[] scores = new double[nOptIter + 1];
for (int i = 0; i <= nOptIter; i++) {
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().maxNumLineSearchIterations(maxNumLineSearchIter).iterations(i).stepFunction(new org.deeplearning4j.nn.conf.stepfunctions.NegativeDefaultStepFunction()).learningRate(1e-1).layer(new RBM.Builder().nIn(1).nOut(1).updater(Updater.SGD).build()).build();
//Normally done by ParamInitializers, but obviously that isn't done here
conf.addVariable("W");
Model m = new RosenbrockFunctionModel(100, conf);
if (i == 0) {
m.computeGradientAndScore();
//Before optimization
scores[0] = m.score();
} else {
ConvexOptimizer opt = getOptimizer(oa, conf, m);
opt.optimize();
m.computeGradientAndScore();
scores[i] = m.score();
assertTrue("NaN or infinite score: " + scores[i], !Double.isNaN(scores[i]) && !Double.isInfinite(scores[i]));
}
}
if (PRINT_OPT_RESULTS) {
System.out.println("Rosenbrock: Multiple optimization iterations ( " + nOptIter + " opt. iter.) score vs iteration, maxNumLineSearchIter= " + maxNumLineSearchIter + ": " + oa);
System.out.println(Arrays.toString(scores));
}
for (int i = 1; i < scores.length; i++) {
if (i == 1) {
//Require at least one step of improvement
assertTrue(scores[i] < scores[i - 1]);
} else {
assertTrue(scores[i] <= scores[i - 1]);
}
}
}
use of org.deeplearning4j.optimize.api.ConvexOptimizer in project deeplearning4j by deeplearning4j.
the class TestDecayPolicies method testLearningRateScoreDecay.
@Test
public void testLearningRateScoreDecay() {
double lr = 0.01;
double lrScoreDecay = 0.10;
int[] nIns = { 4, 2 };
int[] nOuts = { 2, 3 };
int oldScore = 1;
int newScore = 1;
int iteration = 3;
INDArray gradientW = Nd4j.ones(nIns[0], nOuts[0]);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().learningRate(lr).learningRateDecayPolicy(LearningRatePolicy.Score).lrPolicyDecayRate(lrScoreDecay).list().layer(0, new DenseLayer.Builder().nIn(nIns[0]).nOut(nOuts[0]).updater(org.deeplearning4j.nn.conf.Updater.SGD).build()).layer(1, new OutputLayer.Builder().nIn(nIns[1]).nOut(nOuts[1]).updater(org.deeplearning4j.nn.conf.Updater.SGD).build()).backprop(true).pretrain(false).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
ConvexOptimizer opt = new StochasticGradientDescent(net.getDefaultConfiguration(), new NegativeDefaultStepFunction(), null, net);
opt.checkTerminalConditions(gradientW, oldScore, newScore, iteration);
assertEquals(lrScoreDecay, net.getLayer(0).conf().getLrPolicyDecayRate(), 1e-4);
assertEquals(lr * (lrScoreDecay + Nd4j.EPS_THRESHOLD), net.getLayer(0).conf().getLearningRateByParam("W"), 1e-4);
}
Aggregations