use of org.apache.ignite.ml.nn.MultilayerPerceptron in project ignite by apache.
the class MLPGroupTrainerExample method main.
/**
* Executes example.
*
* @param args Command line arguments, none required.
*/
public static void main(String[] args) throws InterruptedException {
// IMPL NOTE based on MLPGroupTrainerTest#testXOR
System.out.println(">>> Distributed multilayer perceptron example started.");
// Start ignite grid.
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
System.out.println(">>> Ignite grid started.");
// Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread
// because we create ignite cache internally.
IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), MLPGroupTrainerExample.class.getSimpleName(), () -> {
int samplesCnt = 10000;
Matrix xorInputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
Matrix xorOutputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0 }, { 1.0 }, { 1.0 }, { 0.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
MLPArchitecture conf = new MLPArchitecture(2).withAddedLayer(10, true, Activators.RELU).withAddedLayer(1, false, Activators.SIGMOID);
IgniteCache<Integer, LabeledVector<Vector, Vector>> cache = LabeledVectorsCache.createNew(ignite);
String cacheName = cache.getName();
Random rnd = new Random(12345L);
try (IgniteDataStreamer<Integer, LabeledVector<Vector, Vector>> streamer = ignite.dataStreamer(cacheName)) {
streamer.perNodeBufferSize(100);
for (int i = 0; i < samplesCnt; i++) {
int col = Math.abs(rnd.nextInt()) % 4;
streamer.addData(i, new LabeledVector<>(xorInputs.getCol(col), xorOutputs.getCol(col)));
}
}
int totalCnt = 100;
int failCnt = 0;
MLPGroupUpdateTrainer<RPropParameterUpdate> trainer = MLPGroupUpdateTrainer.getDefault(ignite).withSyncPeriod(3).withTolerance(0.001).withMaxGlobalSteps(20);
for (int i = 0; i < totalCnt; i++) {
MLPGroupUpdateTrainerCacheInput trainerInput = new MLPGroupUpdateTrainerCacheInput(conf, new RandomInitializer(rnd), 6, cache, 10);
MultilayerPerceptron mlp = trainer.train(trainerInput);
Matrix predict = mlp.apply(xorInputs);
System.out.println(">>> Prediction data at step " + i + " of total " + totalCnt + ":");
Tracer.showAscii(predict);
System.out.println("Difference estimate: " + xorOutputs.getRow(0).minus(predict.getRow(0)).kNorm(2));
failCnt += closeEnough(xorOutputs.getRow(0), predict.getRow(0)) ? 0 : 1;
}
double failRatio = (double) failCnt / totalCnt;
System.out.println("\n>>> Fail percentage: " + (failRatio * 100) + "%.");
System.out.println("\n>>> Distributed multilayer perceptron example completed.");
});
igniteThread.start();
igniteThread.join();
}
}
use of org.apache.ignite.ml.nn.MultilayerPerceptron in project ignite by apache.
the class MLPLocalTrainerExample method main.
/**
* Executes example.
*
* @param args Command line arguments, none required.
*/
public static void main(String[] args) {
// IMPL NOTE based on MLPLocalTrainerTest#testXORRProp
System.out.println(">>> Local multilayer perceptron example started.");
Matrix xorInputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
System.out.println("\n>>> Input data:");
Tracer.showAscii(xorInputs);
Matrix xorOutputs = new DenseLocalOnHeapMatrix(new double[][] { { 0.0 }, { 1.0 }, { 1.0 }, { 0.0 } }, StorageConstants.ROW_STORAGE_MODE).transpose();
MLPArchitecture conf = new MLPArchitecture(2).withAddedLayer(10, true, Activators.RELU).withAddedLayer(1, false, Activators.SIGMOID);
SimpleMLPLocalBatchTrainerInput trainerInput = new SimpleMLPLocalBatchTrainerInput(conf, new Random(1234L), xorInputs, xorOutputs, 4);
System.out.println("\n>>> Perform training.");
MultilayerPerceptron mlp = new MLPLocalBatchTrainer<>(LossFunctions.MSE, RPropUpdateCalculator::new, 0.0001, 16000).train(trainerInput);
System.out.println("\n>>> Apply model.");
Matrix predict = mlp.apply(xorInputs);
System.out.println("\n>>> Predicted data:");
Tracer.showAscii(predict);
System.out.println("\n>>> Reference expected data:");
Tracer.showAscii(xorOutputs);
System.out.println("\n>>> Difference estimate: " + xorOutputs.getRow(0).minus(predict.getRow(0)).kNorm(2));
System.out.println("\n>>> Local multilayer perceptron example completed.");
}
use of org.apache.ignite.ml.nn.MultilayerPerceptron in project ignite by apache.
the class MnistDistributed method testMNISTDistributed.
/**
*/
public void testMNISTDistributed() throws IOException {
int samplesCnt = 60_000;
int hiddenNeuronsCnt = 100;
IgniteBiTuple<Stream<DenseLocalOnHeapVector>, Stream<DenseLocalOnHeapVector>> trainingAndTest = loadMnist(samplesCnt);
// Load training mnist part into a cache.
Stream<DenseLocalOnHeapVector> trainingMnist = trainingAndTest.get1();
List<DenseLocalOnHeapVector> trainingMnistLst = trainingMnist.collect(Collectors.toList());
IgniteCache<Integer, LabeledVector<Vector, Vector>> labeledVectorsCache = LabeledVectorsCache.createNew(ignite);
loadIntoCache(trainingMnistLst, labeledVectorsCache);
MLPGroupUpdateTrainer<RPropParameterUpdate> trainer = MLPGroupUpdateTrainer.getDefault(ignite).withMaxGlobalSteps(35).withSyncPeriod(2);
MLPArchitecture arch = new MLPArchitecture(FEATURES_CNT).withAddedLayer(hiddenNeuronsCnt, true, Activators.SIGMOID).withAddedLayer(10, false, Activators.SIGMOID);
MultilayerPerceptron mdl = trainer.train(new MLPGroupUpdateTrainerCacheInput(arch, 9, labeledVectorsCache, 2000));
IgniteBiTuple<Matrix, Matrix> testDs = createDataset(trainingAndTest.get2(), 10_000, FEATURES_CNT);
Vector truth = testDs.get2().foldColumns(VectorUtils::vec2Num);
Vector predicted = mdl.apply(testDs.get1()).foldColumns(VectorUtils::vec2Num);
Tracer.showAscii(truth);
Tracer.showAscii(predicted);
X.println("Accuracy: " + VectorUtils.zipWith(predicted, truth, (x, y) -> x.equals(y) ? 1.0 : 0.0).sum() / truth.size() * 100 + "%.");
}
use of org.apache.ignite.ml.nn.MultilayerPerceptron in project ignite by apache.
the class MnistLocal method tstMNISTLocal.
/**
* Run nn classifier on MNIST using bi-indexed cache as a storage for dataset.
* To run this test rename this method so it starts from 'test'.
*
* @throws IOException In case of loading MNIST dataset errors.
*/
@Test
public void tstMNISTLocal() throws IOException {
int samplesCnt = 60_000;
int featCnt = 28 * 28;
int hiddenNeuronsCnt = 100;
IgniteBiTuple<Stream<DenseLocalOnHeapVector>, Stream<DenseLocalOnHeapVector>> trainingAndTest = loadMnist(samplesCnt);
Stream<DenseLocalOnHeapVector> trainingMnistStream = trainingAndTest.get1();
Stream<DenseLocalOnHeapVector> testMnistStream = trainingAndTest.get2();
IgniteBiTuple<Matrix, Matrix> ds = createDataset(trainingMnistStream, samplesCnt, featCnt);
IgniteBiTuple<Matrix, Matrix> testDs = createDataset(testMnistStream, 10000, featCnt);
MLPArchitecture conf = new MLPArchitecture(featCnt).withAddedLayer(hiddenNeuronsCnt, true, Activators.SIGMOID).withAddedLayer(10, false, Activators.SIGMOID);
SimpleMLPLocalBatchTrainerInput input = new SimpleMLPLocalBatchTrainerInput(conf, new Random(), ds.get1(), ds.get2(), 2000);
MultilayerPerceptron mdl = new MLPLocalBatchTrainer<>(LossFunctions.MSE, () -> new RPropUpdateCalculator(0.1, 1.2, 0.5), 1E-7, 200).train(input);
X.println("Training started");
long before = System.currentTimeMillis();
X.println("Training finished in " + (System.currentTimeMillis() - before));
Vector predicted = mdl.apply(testDs.get1()).foldColumns(VectorUtils::vec2Num);
Vector truth = testDs.get2().foldColumns(VectorUtils::vec2Num);
Tracer.showAscii(truth);
Tracer.showAscii(predicted);
X.println("Accuracy: " + VectorUtils.zipWith(predicted, truth, (x, y) -> x.equals(y) ? 1.0 : 0.0).sum() / truth.size() * 100 + "%.");
}
Aggregations