use of org.nd4j.linalg.api.rng.DefaultRandom in project deeplearning4j by deeplearning4j.
the class TestOptimizers method testSphereFnOptHelper.
public void testSphereFnOptHelper(OptimizationAlgorithm oa, int numLineSearchIter, int nDimensions) {
if (PRINT_OPT_RESULTS)
System.out.println("---------\n Alg= " + oa + ", nIter= " + numLineSearchIter + ", nDimensions= " + nDimensions);
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().maxNumLineSearchIterations(numLineSearchIter).iterations(100).learningRate(1e-2).layer(new RBM.Builder().nIn(1).nOut(1).updater(Updater.SGD).build()).build();
//Normally done by ParamInitializers, but obviously that isn't done here
conf.addVariable("W");
Random rng = new DefaultRandom(12345L);
org.nd4j.linalg.api.rng.distribution.Distribution dist = new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10);
Model m = new SphereFunctionModel(nDimensions, dist, conf);
m.computeGradientAndScore();
double scoreBefore = m.score();
assertTrue(!Double.isNaN(scoreBefore) && !Double.isInfinite(scoreBefore));
if (PRINT_OPT_RESULTS) {
System.out.println("Before:");
System.out.println(scoreBefore);
System.out.println(m.params());
}
ConvexOptimizer opt = getOptimizer(oa, conf, m);
opt.setupSearchState(m.gradientAndScore());
opt.optimize();
m.computeGradientAndScore();
double scoreAfter = m.score();
assertTrue(!Double.isNaN(scoreAfter) && !Double.isInfinite(scoreAfter));
if (PRINT_OPT_RESULTS) {
System.out.println("After:");
System.out.println(scoreAfter);
System.out.println(m.params());
}
//Expected behaviour after optimization:
//(a) score is better (lower) after optimization.
//(b) Parameters are closer to minimum after optimization (TODO)
assertTrue("Score did not improve after optimization (b= " + scoreBefore + " ,a= " + scoreAfter + ")", scoreAfter < scoreBefore);
}
use of org.nd4j.linalg.api.rng.DefaultRandom in project deeplearning4j by deeplearning4j.
the class TestOptimizers method testSphereFnMultipleStepsHelper.
private static void testSphereFnMultipleStepsHelper(OptimizationAlgorithm oa, int nOptIter, int maxNumLineSearchIter) {
double[] scores = new double[nOptIter + 1];
for (int i = 0; i <= nOptIter; i++) {
Random rng = new DefaultRandom(12345L);
org.nd4j.linalg.api.rng.distribution.Distribution dist = new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10);
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().maxNumLineSearchIterations(maxNumLineSearchIter).iterations(i).learningRate(0.1).layer(new DenseLayer.Builder().nIn(1).nOut(1).updater(Updater.SGD).build()).build();
//Normally done by ParamInitializers, but obviously that isn't done here
conf.addVariable("W");
Model m = new SphereFunctionModel(100, dist, conf);
if (i == 0) {
m.computeGradientAndScore();
//Before optimization
scores[0] = m.score();
} else {
ConvexOptimizer opt = getOptimizer(oa, conf, m);
opt.optimize();
m.computeGradientAndScore();
scores[i] = m.score();
assertTrue(!Double.isNaN(scores[i]) && !Double.isInfinite(scores[i]));
}
}
if (PRINT_OPT_RESULTS) {
System.out.println("Multiple optimization iterations (" + nOptIter + " opt. iter.) score vs iteration, maxNumLineSearchIter=" + maxNumLineSearchIter + ": " + oa);
System.out.println(Arrays.toString(scores));
}
for (int i = 1; i < scores.length; i++) {
assertTrue(scores[i] <= scores[i - 1]);
}
//Very easy function, expect score ~= 0 with any reasonable number of steps/numLineSearchIter
assertTrue(scores[scores.length - 1] < 1.0);
}
Aggregations