use of org.nd4j.linalg.api.rng.Random in project nd4j by deeplearning4j.
the class RandomTests method testAndersonDarling.
/**
* Uses a test of Gaussianity for testing the values out of GaussianDistribution
* See https://en.wikipedia.org/wiki/Anderson%E2%80%93Darling_test
*
* @throws Exception
*/
@Test
public void testAndersonDarling() throws Exception {
Random random1 = Nd4j.getRandomFactory().getNewRandomInstance(119);
INDArray z1 = Nd4j.create(1000);
GaussianDistribution op1 = new GaussianDistribution(z1, 0.0, 1.0);
Nd4j.getExecutioner().exec(op1, random1);
int n = z1.length();
// using this just for the cdf
Distribution nd = new NormalDistribution(random1, 0.0, 1.0);
Nd4j.sort(z1, true);
System.out.println("Data for Anderson-Darling: " + z1);
for (int i = 0; i < n; i++) {
Double res = nd.cumulativeProbability(z1.getDouble(i));
assertTrue(res >= 0.0);
assertTrue(res <= 1.0);
// avoid overflow when taking log later.
if (res == 0)
res = 0.0000001;
if (res == 1)
res = 0.9999999;
z1.putScalar(i, res);
}
double A = 0.0;
for (int i = 0; i < n; i++) {
A -= (2 * i + 1) * (Math.log(z1.getDouble(i)) + Math.log(1 - z1.getDouble(n - i - 1)));
}
A = A / n - n;
A *= (1 + 4.0 / n - 25.0 / (n * n));
assertTrue("Critical (max) value for 1000 points and confidence α = 0.0001 is 1.8692, received: " + A, A < 1.8692);
}
use of org.nd4j.linalg.api.rng.Random in project nd4j by deeplearning4j.
the class RandomTests method testStepOver2.
@Test
public void testStepOver2() throws Exception {
Random random = Nd4j.getRandomFactory().getNewRandomInstance(119);
if (random instanceof NativeRandom) {
NativeRandom rng = (NativeRandom) random;
assertTrue(rng.getBufferSize() > 1000000L);
assertEquals(0, rng.getPosition());
rng.nextLong();
assertEquals(1, rng.getPosition());
assertEquals(1, rng.getGeneration());
for (long e = 0; e <= rng.getBufferSize(); e++) {
rng.nextLong();
}
assertEquals(2, rng.getPosition());
assertEquals(2, rng.getGeneration());
rng.reSeed(8792);
assertEquals(2, rng.getGeneration());
assertEquals(2, rng.getPosition());
} else
log.warn("Not a NativeRandom object received, skipping test");
}
use of org.nd4j.linalg.api.rng.Random in project nd4j by deeplearning4j.
the class RandomTests method testStepOver4.
@Test
public void testStepOver4() throws Exception {
Random random1 = Nd4j.getRandomFactory().getNewRandomInstance(119, 100000);
Random random2 = Nd4j.getRandomFactory().getNewRandomInstance(119, 100000);
for (int x = 0; x < 1000; x++) {
INDArray z1 = Nd4j.rand(1, 10000, random1);
INDArray z2 = Nd4j.rand(1, 10000, random2);
assertEquals(z1, z2);
}
}
use of org.nd4j.linalg.api.rng.Random in project deeplearning4j by deeplearning4j.
the class TestOptimizers method testSphereFnMultipleStepsHelper.
private static void testSphereFnMultipleStepsHelper(OptimizationAlgorithm oa, int nOptIter, int maxNumLineSearchIter) {
double[] scores = new double[nOptIter + 1];
for (int i = 0; i <= nOptIter; i++) {
Random rng = new DefaultRandom(12345L);
org.nd4j.linalg.api.rng.distribution.Distribution dist = new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10);
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().maxNumLineSearchIterations(maxNumLineSearchIter).iterations(i).learningRate(0.1).layer(new DenseLayer.Builder().nIn(1).nOut(1).updater(Updater.SGD).build()).build();
//Normally done by ParamInitializers, but obviously that isn't done here
conf.addVariable("W");
Model m = new SphereFunctionModel(100, dist, conf);
if (i == 0) {
m.computeGradientAndScore();
//Before optimization
scores[0] = m.score();
} else {
ConvexOptimizer opt = getOptimizer(oa, conf, m);
opt.optimize();
m.computeGradientAndScore();
scores[i] = m.score();
assertTrue(!Double.isNaN(scores[i]) && !Double.isInfinite(scores[i]));
}
}
if (PRINT_OPT_RESULTS) {
System.out.println("Multiple optimization iterations (" + nOptIter + " opt. iter.) score vs iteration, maxNumLineSearchIter=" + maxNumLineSearchIter + ": " + oa);
System.out.println(Arrays.toString(scores));
}
for (int i = 1; i < scores.length; i++) {
assertTrue(scores[i] <= scores[i - 1]);
}
//Very easy function, expect score ~= 0 with any reasonable number of steps/numLineSearchIter
assertTrue(scores[scores.length - 1] < 1.0);
}
use of org.nd4j.linalg.api.rng.Random in project deeplearning4j by deeplearning4j.
the class DM method inferSequence.
/**
* This method does training on previously unseen paragraph, and returns inferred vector
*
* @param sequence
* @param nr
* @param learningRate
* @return
*/
@Override
public INDArray inferSequence(Sequence<T> sequence, long nr, double learningRate, double minLearningRate, int iterations) {
AtomicLong nextRandom = new AtomicLong(nr);
if (sequence.isEmpty())
return null;
Random random = Nd4j.getRandomFactory().getNewRandomInstance(configuration.getSeed() * sequence.hashCode(), lookupTable.layerSize() + 1);
INDArray ret = Nd4j.rand(new int[] { 1, lookupTable.layerSize() }, random).subi(0.5).divi(lookupTable.layerSize());
for (int iter = 0; iter < iterations; iter++) {
for (int i = 0; i < sequence.size(); i++) {
nextRandom.set(Math.abs(nextRandom.get() * 25214903917L + 11));
dm(i, sequence, (int) nextRandom.get() % window, nextRandom, learningRate, null, true, ret);
}
learningRate = ((learningRate - minLearningRate) / (iterations - iter)) + minLearningRate;
}
finish();
return ret;
}
Aggregations