use of org.apache.ignite.ml.nn.initializers.RandomInitializer in project ignite by apache.
the class MLPGroupTrainerTest method doTestXOR.
/**
* Test training of 'xor' by {@link MLPGroupUpdateTrainer}.
*/
private <U extends Serializable> void doTestXOR(UpdatesStrategy<? super MultilayerPerceptron, U> stgy) {
int samplesCnt = 1000;
Matrix xorInputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
Matrix xorOutputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0 }, { 1.0 }, { 1.0 }, { 0.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
MLPArchitecture conf = new MLPArchitecture(2).withAddedLayer(10, true, Activators.RELU).withAddedLayer(1, false, Activators.SIGMOID);
IgniteCache<Integer, LabeledVector<Vector, Vector>> cache = LabeledVectorsCache.createNew(ignite);
String cacheName = cache.getName();
Random rnd = new Random(12345L);
try (IgniteDataStreamer<Integer, LabeledVector<Vector, Vector>> streamer = ignite.dataStreamer(cacheName)) {
streamer.perNodeBufferSize(10000);
for (int i = 0; i < samplesCnt; i++) {
int col = Math.abs(rnd.nextInt()) % 4;
streamer.addData(i, new LabeledVector<>(xorInputs.getCol(col), xorOutputs.getCol(col)));
}
}
int totalCnt = 30;
int failCnt = 0;
double maxFailRatio = 0.3;
MLPGroupUpdateTrainer<U> trainer = MLPGroupUpdateTrainer.getDefault(ignite).withSyncPeriod(3).withTolerance(0.001).withMaxGlobalSteps(100).withUpdateStrategy(stgy);
for (int i = 0; i < totalCnt; i++) {
MLPGroupUpdateTrainerCacheInput trainerInput = new MLPGroupUpdateTrainerCacheInput(conf, new RandomInitializer(new Random(123L + i)), 6, cache, 10, new Random(123L + i));
MultilayerPerceptron mlp = trainer.train(trainerInput);
Matrix predict = mlp.apply(xorInputs);
Tracer.showAscii(predict);
X.println(xorOutputs.getRow(0).minus(predict.getRow(0)).kNorm(2) + "");
failCnt += TestUtils.checkIsInEpsilonNeighbourhoodBoolean(xorOutputs.getRow(0), predict.getRow(0), 5E-1) ? 0 : 1;
}
double failRatio = (double) failCnt / totalCnt;
System.out.println("Fail percentage: " + (failRatio * 100) + "%.");
assertTrue(failRatio < maxFailRatio);
}
use of org.apache.ignite.ml.nn.initializers.RandomInitializer in project ignite by apache.
the class MLPGroupTrainerExample method main.
/**
* Executes example.
*
* @param args Command line arguments, none required.
*/
public static void main(String[] args) throws InterruptedException {
// IMPL NOTE based on MLPGroupTrainerTest#testXOR
System.out.println(">>> Distributed multilayer perceptron example started.");
// Start ignite grid.
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
System.out.println(">>> Ignite grid started.");
// Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread
// because we create ignite cache internally.
IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), MLPGroupTrainerExample.class.getSimpleName(), () -> {
int samplesCnt = 10000;
Matrix xorInputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
Matrix xorOutputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0 }, { 1.0 }, { 1.0 }, { 0.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
MLPArchitecture conf = new MLPArchitecture(2).withAddedLayer(10, true, Activators.RELU).withAddedLayer(1, false, Activators.SIGMOID);
IgniteCache<Integer, LabeledVector<Vector, Vector>> cache = LabeledVectorsCache.createNew(ignite);
String cacheName = cache.getName();
Random rnd = new Random(12345L);
try (IgniteDataStreamer<Integer, LabeledVector<Vector, Vector>> streamer = ignite.dataStreamer(cacheName)) {
streamer.perNodeBufferSize(100);
for (int i = 0; i < samplesCnt; i++) {
int col = Math.abs(rnd.nextInt()) % 4;
streamer.addData(i, new LabeledVector<>(xorInputs.getCol(col), xorOutputs.getCol(col)));
}
}
int totalCnt = 100;
int failCnt = 0;
MLPGroupUpdateTrainer<RPropParameterUpdate> trainer = MLPGroupUpdateTrainer.getDefault(ignite).withSyncPeriod(3).withTolerance(0.001).withMaxGlobalSteps(20);
for (int i = 0; i < totalCnt; i++) {
MLPGroupUpdateTrainerCacheInput trainerInput = new MLPGroupUpdateTrainerCacheInput(conf, new RandomInitializer(rnd), 6, cache, 10);
MultilayerPerceptron mlp = trainer.train(trainerInput);
Matrix predict = mlp.apply(xorInputs);
System.out.println(">>> Prediction data at step " + i + " of total " + totalCnt + ":");
Tracer.showAscii(predict);
System.out.println("Difference estimate: " + xorOutputs.getRow(0).minus(predict.getRow(0)).kNorm(2));
failCnt += closeEnough(xorOutputs.getRow(0), predict.getRow(0)) ? 0 : 1;
}
double failRatio = (double) failCnt / totalCnt;
System.out.println("\n>>> Fail percentage: " + (failRatio * 100) + "%.");
System.out.println("\n>>> Distributed multilayer perceptron example completed.");
});
igniteThread.start();
igniteThread.join();
}
}
Aggregations