use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class MLPTest method testXOR.
/**
* Test that MLP with parameters that should produce function close to 'XOR' is close to 'XOR' on 'XOR' domain.
*/
@Test
public void testXOR() {
MLPArchitecture conf = new MLPArchitecture(2).withAddedLayer(2, true, Activators.SIGMOID).withAddedLayer(1, true, Activators.SIGMOID);
MultilayerPerceptron mlp = new MultilayerPerceptron(conf, new MLPConstInitializer(1, 2));
mlp.setWeights(1, new DenseLocalOnHeapMatrix(new double[][] { { 20.0, 20.0 }, { -20.0, -20.0 } }));
mlp.setBiases(1, new DenseLocalOnHeapVector(new double[] { -10.0, 30.0 }));
mlp.setWeights(2, new DenseLocalOnHeapMatrix(new double[][] { { 20.0, 20.0 } }));
mlp.setBiases(2, new DenseLocalOnHeapVector(new double[] { -30.0 }));
Matrix input = new DenseLocalOnHeapMatrix(new double[][] { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }).transpose();
Matrix predict = mlp.apply(input);
Vector truth = new DenseLocalOnHeapVector(new double[] { 0.0, 1.0, 1.0, 0.0 });
TestUtils.checkIsInEpsilonNeighbourhood(predict.getRow(0), truth, 1E-4);
}
use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class MnistDistributed method testMNISTDistributed.
/**
*/
public void testMNISTDistributed() throws IOException {
int samplesCnt = 60_000;
int hiddenNeuronsCnt = 100;
IgniteBiTuple<Stream<DenseLocalOnHeapVector>, Stream<DenseLocalOnHeapVector>> trainingAndTest = loadMnist(samplesCnt);
// Load training mnist part into a cache.
Stream<DenseLocalOnHeapVector> trainingMnist = trainingAndTest.get1();
List<DenseLocalOnHeapVector> trainingMnistLst = trainingMnist.collect(Collectors.toList());
IgniteCache<Integer, LabeledVector<Vector, Vector>> labeledVectorsCache = LabeledVectorsCache.createNew(ignite);
loadIntoCache(trainingMnistLst, labeledVectorsCache);
MLPGroupUpdateTrainer<RPropParameterUpdate> trainer = MLPGroupUpdateTrainer.getDefault(ignite).withMaxGlobalSteps(35).withSyncPeriod(2);
MLPArchitecture arch = new MLPArchitecture(FEATURES_CNT).withAddedLayer(hiddenNeuronsCnt, true, Activators.SIGMOID).withAddedLayer(10, false, Activators.SIGMOID);
MultilayerPerceptron mdl = trainer.train(new MLPGroupUpdateTrainerCacheInput(arch, 9, labeledVectorsCache, 2000));
IgniteBiTuple<Matrix, Matrix> testDs = createDataset(trainingAndTest.get2(), 10_000, FEATURES_CNT);
Vector truth = testDs.get2().foldColumns(VectorUtils::vec2Num);
Vector predicted = mdl.apply(testDs.get1()).foldColumns(VectorUtils::vec2Num);
Tracer.showAscii(truth);
Tracer.showAscii(predicted);
X.println("Accuracy: " + VectorUtils.zipWith(predicted, truth, (x, y) -> x.equals(y) ? 1.0 : 0.0).sum() / truth.size() * 100 + "%.");
}
use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class MnistLocal method tstMNISTLocal.
/**
* Run nn classifier on MNIST using bi-indexed cache as a storage for dataset.
* To run this test rename this method so it starts from 'test'.
*
* @throws IOException In case of loading MNIST dataset errors.
*/
@Test
public void tstMNISTLocal() throws IOException {
int samplesCnt = 60_000;
int featCnt = 28 * 28;
int hiddenNeuronsCnt = 100;
IgniteBiTuple<Stream<DenseLocalOnHeapVector>, Stream<DenseLocalOnHeapVector>> trainingAndTest = loadMnist(samplesCnt);
Stream<DenseLocalOnHeapVector> trainingMnistStream = trainingAndTest.get1();
Stream<DenseLocalOnHeapVector> testMnistStream = trainingAndTest.get2();
IgniteBiTuple<Matrix, Matrix> ds = createDataset(trainingMnistStream, samplesCnt, featCnt);
IgniteBiTuple<Matrix, Matrix> testDs = createDataset(testMnistStream, 10000, featCnt);
MLPArchitecture conf = new MLPArchitecture(featCnt).withAddedLayer(hiddenNeuronsCnt, true, Activators.SIGMOID).withAddedLayer(10, false, Activators.SIGMOID);
SimpleMLPLocalBatchTrainerInput input = new SimpleMLPLocalBatchTrainerInput(conf, new Random(), ds.get1(), ds.get2(), 2000);
MultilayerPerceptron mdl = new MLPLocalBatchTrainer<>(LossFunctions.MSE, () -> new RPropUpdateCalculator(0.1, 1.2, 0.5), 1E-7, 200).train(input);
X.println("Training started");
long before = System.currentTimeMillis();
X.println("Training finished in " + (System.currentTimeMillis() - before));
Vector predicted = mdl.apply(testDs.get1()).foldColumns(VectorUtils::vec2Num);
Vector truth = testDs.get2().foldColumns(VectorUtils::vec2Num);
Tracer.showAscii(truth);
Tracer.showAscii(predicted);
X.println("Accuracy: " + VectorUtils.zipWith(predicted, truth, (x, y) -> x.equals(y) ? 1.0 : 0.0).sum() / truth.size() * 100 + "%.");
}
use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class GradientDescentTest method testOptimizeWithOffset.
/**
* Test gradient descent optimization on function y = (x - 2)^2 with gradient function 2 * (x - 2).
*/
@Test
public void testOptimizeWithOffset() {
GradientDescent gradientDescent = new GradientDescent((inputs, groundTruth, point) -> point.minus(new DenseLocalOnHeapVector(new double[] { 2.0 })).times(2.0), new SimpleUpdater(0.01));
Vector res = gradientDescent.optimize(new DenseLocalOnHeapMatrix(new double[1][1]), new DenseLocalOnHeapVector(new double[] { 2.0 }));
TestUtils.assertEquals(2, res.get(0), PRECISION);
}
use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class LinearRegressionModelTest method testPredict.
/**
*/
@Test
public void testPredict() {
Vector weights = new DenseLocalOnHeapVector(new double[] { 2.0, 3.0 });
LinearRegressionModel mdl = new LinearRegressionModel(weights, 1.0);
Vector observation = new DenseLocalOnHeapVector(new double[] { 1.0, 1.0 });
TestUtils.assertEquals(1.0 + 2.0 * 1.0 + 3.0 * 1.0, mdl.apply(observation), PRECISION);
observation = new DenseLocalOnHeapVector(new double[] { 2.0, 1.0 });
TestUtils.assertEquals(1.0 + 2.0 * 2.0 + 3.0 * 1.0, mdl.apply(observation), PRECISION);
observation = new DenseLocalOnHeapVector(new double[] { 1.0, 2.0 });
TestUtils.assertEquals(1.0 + 2.0 * 1.0 + 3.0 * 2.0, mdl.apply(observation), PRECISION);
observation = new DenseLocalOnHeapVector(new double[] { -2.0, 1.0 });
TestUtils.assertEquals(1.0 - 2.0 * 2.0 + 3.0 * 1.0, mdl.apply(observation), PRECISION);
observation = new DenseLocalOnHeapVector(new double[] { 1.0, -2.0 });
TestUtils.assertEquals(1.0 + 2.0 * 1.0 - 3.0 * 2.0, mdl.apply(observation), PRECISION);
}
Aggregations