Search in sources :

Example 1 with MLPArchitecture

use of org.apache.ignite.ml.nn.architecture.MLPArchitecture in project ignite by apache.

the class MLPGroupTrainerTest method doTestXOR.

/**
 * Test training of 'xor' by {@link MLPGroupUpdateTrainer}.
 */
private <U extends Serializable> void doTestXOR(UpdatesStrategy<? super MultilayerPerceptron, U> stgy) {
    int samplesCnt = 1000;
    Matrix xorInputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
    Matrix xorOutputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0 }, { 1.0 }, { 1.0 }, { 0.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
    MLPArchitecture conf = new MLPArchitecture(2).withAddedLayer(10, true, Activators.RELU).withAddedLayer(1, false, Activators.SIGMOID);
    IgniteCache<Integer, LabeledVector<Vector, Vector>> cache = LabeledVectorsCache.createNew(ignite);
    String cacheName = cache.getName();
    Random rnd = new Random(12345L);
    try (IgniteDataStreamer<Integer, LabeledVector<Vector, Vector>> streamer = ignite.dataStreamer(cacheName)) {
        streamer.perNodeBufferSize(10000);
        for (int i = 0; i < samplesCnt; i++) {
            int col = Math.abs(rnd.nextInt()) % 4;
            streamer.addData(i, new LabeledVector<>(xorInputs.getCol(col), xorOutputs.getCol(col)));
        }
    }
    int totalCnt = 30;
    int failCnt = 0;
    double maxFailRatio = 0.3;
    MLPGroupUpdateTrainer<U> trainer = MLPGroupUpdateTrainer.getDefault(ignite).withSyncPeriod(3).withTolerance(0.001).withMaxGlobalSteps(100).withUpdateStrategy(stgy);
    for (int i = 0; i < totalCnt; i++) {
        MLPGroupUpdateTrainerCacheInput trainerInput = new MLPGroupUpdateTrainerCacheInput(conf, new RandomInitializer(new Random(123L + i)), 6, cache, 10, new Random(123L + i));
        MultilayerPerceptron mlp = trainer.train(trainerInput);
        Matrix predict = mlp.apply(xorInputs);
        Tracer.showAscii(predict);
        X.println(xorOutputs.getRow(0).minus(predict.getRow(0)).kNorm(2) + "");
        failCnt += TestUtils.checkIsInEpsilonNeighbourhoodBoolean(xorOutputs.getRow(0), predict.getRow(0), 5E-1) ? 0 : 1;
    }
    double failRatio = (double) failCnt / totalCnt;
    System.out.println("Fail percentage: " + (failRatio * 100) + "%.");
    assertTrue(failRatio < maxFailRatio);
}
Also used : MLPArchitecture(org.apache.ignite.ml.nn.architecture.MLPArchitecture) LabeledVector(org.apache.ignite.ml.structures.LabeledVector) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) Matrix(org.apache.ignite.ml.math.Matrix) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) Random(java.util.Random) RandomInitializer(org.apache.ignite.ml.nn.initializers.RandomInitializer)

Example 2 with MLPArchitecture

use of org.apache.ignite.ml.nn.architecture.MLPArchitecture in project ignite by apache.

the class MLPTest method paramsCountTest.

/**
 * Test parameters count works well.
 */
@Test
public void paramsCountTest() {
    int inputSize = 10;
    int layerWithBiasNeuronsCnt = 13;
    int layerWithoutBiasNeuronsCnt = 17;
    MLPArchitecture conf = new MLPArchitecture(inputSize).withAddedLayer(layerWithBiasNeuronsCnt, true, Activators.SIGMOID).withAddedLayer(layerWithoutBiasNeuronsCnt, false, Activators.SIGMOID);
    Assert.assertEquals(layerWithBiasNeuronsCnt * inputSize + layerWithBiasNeuronsCnt + (layerWithoutBiasNeuronsCnt * layerWithBiasNeuronsCnt), conf.parametersCount());
}
Also used : MLPArchitecture(org.apache.ignite.ml.nn.architecture.MLPArchitecture) Test(org.junit.Test)

Example 3 with MLPArchitecture

use of org.apache.ignite.ml.nn.architecture.MLPArchitecture in project ignite by apache.

the class MLPTest method testDifferentiation.

/**
 * Test differentiation.
 */
@Test
public void testDifferentiation() {
    int inputSize = 2;
    int firstLayerNeuronsCnt = 1;
    double w10 = 0.1;
    double w11 = 0.2;
    MLPArchitecture conf = new MLPArchitecture(inputSize).withAddedLayer(firstLayerNeuronsCnt, false, Activators.SIGMOID);
    MultilayerPerceptron mlp = new MultilayerPerceptron(conf);
    mlp.setWeight(1, 0, 0, w10);
    mlp.setWeight(1, 1, 0, w11);
    double x0 = 1.0;
    double x1 = 3.0;
    Matrix inputs = new DenseLocalOnHeapMatrix(new double[][] { { x0, x1 } }).transpose();
    double ytt = 1.0;
    Matrix truth = new DenseLocalOnHeapMatrix(new double[][] { { ytt } }).transpose();
    Vector grad = mlp.differentiateByParameters(LossFunctions.MSE, inputs, truth);
    // Let yt be y ground truth value.
    // d/dw1i [(yt - sigma(w10 * x0 + w11 * x1))^2] =
    // 2 * (yt - sigma(w10 * x0 + w11 * x1)) * (-1) * (sigma(w10 * x0 + w11 * x1)) * (1 - sigma(w10 * x0 + w11 * x1)) * xi =
    // let z = sigma(w10 * x0 + w11 * x1)
    // - 2* (yt - z) * (z) * (1 - z) * xi.
    IgniteTriFunction<Double, Vector, Vector, Vector> partialDer = (yt, w, x) -> {
        Double z = Activators.SIGMOID.apply(w.dot(x));
        return x.copy().map(xi -> -2 * (yt - z) * z * (1 - z) * xi);
    };
    Vector weightsVec = mlp.weights(1).getRow(0);
    Tracer.showAscii(weightsVec);
    Vector trueGrad = partialDer.apply(ytt, weightsVec, inputs.getCol(0));
    Tracer.showAscii(trueGrad);
    Tracer.showAscii(grad);
    Assert.assertEquals(mlp.architecture().parametersCount(), grad.size());
    Assert.assertEquals(trueGrad, grad);
}
Also used : TestUtils(org.apache.ignite.ml.TestUtils) Vector(org.apache.ignite.ml.math.Vector) Matrix(org.apache.ignite.ml.math.Matrix) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) IgniteTriFunction(org.apache.ignite.ml.math.functions.IgniteTriFunction) LossFunctions(org.apache.ignite.ml.optimization.LossFunctions) Tracer(org.apache.ignite.ml.math.Tracer) Test(org.junit.Test) MLPArchitecture(org.apache.ignite.ml.nn.architecture.MLPArchitecture) Assert(org.junit.Assert) DenseLocalOnHeapVector(org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector) Matrix(org.apache.ignite.ml.math.Matrix) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) MLPArchitecture(org.apache.ignite.ml.nn.architecture.MLPArchitecture) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) Vector(org.apache.ignite.ml.math.Vector) DenseLocalOnHeapVector(org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector) Test(org.junit.Test)

Example 4 with MLPArchitecture

use of org.apache.ignite.ml.nn.architecture.MLPArchitecture in project ignite by apache.

the class MLPTest method testSimpleMLPPrediction.

/**
 * Tests that MLP with 2 layer, 1 neuron in each layer and weight equal to 1 is equivalent to sigmoid function.
 */
@Test
public void testSimpleMLPPrediction() {
    MLPArchitecture conf = new MLPArchitecture(1).withAddedLayer(1, false, Activators.SIGMOID);
    MultilayerPerceptron mlp = new MultilayerPerceptron(conf, new MLPConstInitializer(1));
    int input = 2;
    Matrix predict = mlp.apply(new DenseLocalOnHeapMatrix(new double[][] { { input } }));
    Assert.assertEquals(predict, new DenseLocalOnHeapMatrix(new double[][] { { Activators.SIGMOID.apply(input) } }));
}
Also used : Matrix(org.apache.ignite.ml.math.Matrix) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) MLPArchitecture(org.apache.ignite.ml.nn.architecture.MLPArchitecture) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) Test(org.junit.Test)

Example 5 with MLPArchitecture

use of org.apache.ignite.ml.nn.architecture.MLPArchitecture in project ignite by apache.

the class MLPGroupTrainerExample method main.

/**
 * Executes example.
 *
 * @param args Command line arguments, none required.
 */
public static void main(String[] args) throws InterruptedException {
    // IMPL NOTE based on MLPGroupTrainerTest#testXOR
    System.out.println(">>> Distributed multilayer perceptron example started.");
    // Start ignite grid.
    try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
        System.out.println(">>> Ignite grid started.");
        // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread
        // because we create ignite cache internally.
        IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), MLPGroupTrainerExample.class.getSimpleName(), () -> {
            int samplesCnt = 10000;
            Matrix xorInputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
            Matrix xorOutputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0 }, { 1.0 }, { 1.0 }, { 0.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
            MLPArchitecture conf = new MLPArchitecture(2).withAddedLayer(10, true, Activators.RELU).withAddedLayer(1, false, Activators.SIGMOID);
            IgniteCache<Integer, LabeledVector<Vector, Vector>> cache = LabeledVectorsCache.createNew(ignite);
            String cacheName = cache.getName();
            Random rnd = new Random(12345L);
            try (IgniteDataStreamer<Integer, LabeledVector<Vector, Vector>> streamer = ignite.dataStreamer(cacheName)) {
                streamer.perNodeBufferSize(100);
                for (int i = 0; i < samplesCnt; i++) {
                    int col = Math.abs(rnd.nextInt()) % 4;
                    streamer.addData(i, new LabeledVector<>(xorInputs.getCol(col), xorOutputs.getCol(col)));
                }
            }
            int totalCnt = 100;
            int failCnt = 0;
            MLPGroupUpdateTrainer<RPropParameterUpdate> trainer = MLPGroupUpdateTrainer.getDefault(ignite).withSyncPeriod(3).withTolerance(0.001).withMaxGlobalSteps(20);
            for (int i = 0; i < totalCnt; i++) {
                MLPGroupUpdateTrainerCacheInput trainerInput = new MLPGroupUpdateTrainerCacheInput(conf, new RandomInitializer(rnd), 6, cache, 10);
                MultilayerPerceptron mlp = trainer.train(trainerInput);
                Matrix predict = mlp.apply(xorInputs);
                System.out.println(">>> Prediction data at step " + i + " of total " + totalCnt + ":");
                Tracer.showAscii(predict);
                System.out.println("Difference estimate: " + xorOutputs.getRow(0).minus(predict.getRow(0)).kNorm(2));
                failCnt += closeEnough(xorOutputs.getRow(0), predict.getRow(0)) ? 0 : 1;
            }
            double failRatio = (double) failCnt / totalCnt;
            System.out.println("\n>>> Fail percentage: " + (failRatio * 100) + "%.");
            System.out.println("\n>>> Distributed multilayer perceptron example completed.");
        });
        igniteThread.start();
        igniteThread.join();
    }
}
Also used : MLPArchitecture(org.apache.ignite.ml.nn.architecture.MLPArchitecture) LabeledVector(org.apache.ignite.ml.structures.LabeledVector) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) MultilayerPerceptron(org.apache.ignite.ml.nn.MultilayerPerceptron) Matrix(org.apache.ignite.ml.math.Matrix) DenseLocalOnHeapMatrix(org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix) Random(java.util.Random) RPropParameterUpdate(org.apache.ignite.ml.optimization.updatecalculators.RPropParameterUpdate) Ignite(org.apache.ignite.Ignite) IgniteThread(org.apache.ignite.thread.IgniteThread) RandomInitializer(org.apache.ignite.ml.nn.initializers.RandomInitializer) MLPGroupUpdateTrainerCacheInput(org.apache.ignite.ml.nn.MLPGroupUpdateTrainerCacheInput)

Aggregations

MLPArchitecture (org.apache.ignite.ml.nn.architecture.MLPArchitecture)12 Matrix (org.apache.ignite.ml.math.Matrix)10 DenseLocalOnHeapMatrix (org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix)9 Test (org.junit.Test)7 Random (java.util.Random)5 DenseLocalOnHeapVector (org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector)5 Vector (org.apache.ignite.ml.math.Vector)4 MultilayerPerceptron (org.apache.ignite.ml.nn.MultilayerPerceptron)4 LabeledVector (org.apache.ignite.ml.structures.LabeledVector)3 Stream (java.util.stream.Stream)2 VectorUtils (org.apache.ignite.ml.math.VectorUtils)2 MLPGroupUpdateTrainerCacheInput (org.apache.ignite.ml.nn.MLPGroupUpdateTrainerCacheInput)2 RandomInitializer (org.apache.ignite.ml.nn.initializers.RandomInitializer)2 RPropParameterUpdate (org.apache.ignite.ml.optimization.updatecalculators.RPropParameterUpdate)2 Ignite (org.apache.ignite.Ignite)1 TestUtils (org.apache.ignite.ml.TestUtils)1 Tracer (org.apache.ignite.ml.math.Tracer)1 IgniteTriFunction (org.apache.ignite.ml.math.functions.IgniteTriFunction)1 SimpleMLPLocalBatchTrainerInput (org.apache.ignite.ml.nn.SimpleMLPLocalBatchTrainerInput)1 LossFunctions (org.apache.ignite.ml.optimization.LossFunctions)1