use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.
the class TestSparkComputationGraph method testBasic.
@Test
public void testBasic() throws Exception {
JavaSparkContext sc = this.sc;
RecordReader rr = new CSVRecordReader(0, ",");
rr.initialize(new FileSplit(new ClassPathResource("iris.txt").getTempFileFromArchive()));
MultiDataSetIterator iter = new RecordReaderMultiDataSetIterator.Builder(1).addReader("iris", rr).addInput("iris", 0, 3).addOutputOneHot("iris", 4, 3).build();
List<MultiDataSet> list = new ArrayList<>(150);
while (iter.hasNext()) list.add(iter.next());
ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder().optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).learningRate(0.1).graphBuilder().addInputs("in").addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out", new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(3).build(), "dense").setOutputs("out").pretrain(false).backprop(true).build();
ComputationGraph cg = new ComputationGraph(config);
cg.init();
TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0);
SparkComputationGraph scg = new SparkComputationGraph(sc, cg, tm);
scg.setListeners(Collections.singleton((IterationListener) new ScoreIterationListener(1)));
JavaRDD<MultiDataSet> rdd = sc.parallelize(list);
scg.fitMultiDataSet(rdd);
//Try: fitting using DataSet
DataSetIterator iris = new IrisDataSetIterator(1, 150);
List<DataSet> list2 = new ArrayList<>();
while (iris.hasNext()) list2.add(iris.next());
JavaRDD<DataSet> rddDS = sc.parallelize(list2);
scg.fit(rddDS);
}
use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.
the class TestCompareParameterAveragingSparkVsSingleMachine method testAverageEveryStepGraph.
@Test
public void testAverageEveryStepGraph() {
//Idea: averaging every step with SGD (SGD updater + optimizer) is mathematically identical to doing the learning
// on a single machine for synchronous distributed training
//BUT: This is *ONLY* the case if all workers get an identical number of examples. This won't be the case if
// we use RDD.randomSplit (which is what occurs if we use .fit(JavaRDD<DataSet> on a data set that needs splitting),
// which might give a number of examples that isn't divisible by number of workers (like 39 examples on 4 executors)
//This is also ONLY the case using SGD updater
int miniBatchSizePerWorker = 10;
int nWorkers = 4;
for (boolean saveUpdater : new boolean[] { true, false }) {
JavaSparkContext sc = getContext(nWorkers);
try {
//Do training locally, for 3 minibatches
int[] seeds = { 1, 2, 3 };
// CudaGridExecutioner executioner = (CudaGridExecutioner) Nd4j.getExecutioner();
ComputationGraph net = new ComputationGraph(getGraphConf(12345, Updater.SGD));
net.init();
INDArray initialParams = net.params().dup();
for (int i = 0; i < seeds.length; i++) {
DataSet ds = getOneDataSet(miniBatchSizePerWorker * nWorkers, seeds[i]);
if (!saveUpdater)
net.setUpdater(null);
net.fit(ds);
}
INDArray finalParams = net.params().dup();
// executioner.addToWatchdog(finalParams, "finalParams");
//Do training on Spark with one executor, for 3 separate minibatches
TrainingMaster tm = getTrainingMaster(1, miniBatchSizePerWorker, saveUpdater);
SparkComputationGraph sparkNet = new SparkComputationGraph(sc, getGraphConf(12345, Updater.SGD), tm);
sparkNet.setCollectTrainingStats(true);
INDArray initialSparkParams = sparkNet.getNetwork().params().dup();
for (int i = 0; i < seeds.length; i++) {
List<DataSet> list = getOneDataSetAsIndividalExamples(miniBatchSizePerWorker * nWorkers, seeds[i]);
JavaRDD<DataSet> rdd = sc.parallelize(list);
sparkNet.fit(rdd);
}
System.out.println(sparkNet.getSparkTrainingStats().statsAsString());
INDArray finalSparkParams = sparkNet.getNetwork().params().dup();
// executioner.addToWatchdog(finalSparkParams, "finalSparkParams");
float[] fp = finalParams.data().asFloat();
float[] fps = finalSparkParams.data().asFloat();
System.out.println("Initial (Local) params: " + Arrays.toString(initialParams.data().asFloat()));
System.out.println("Initial (Spark) params: " + Arrays.toString(initialSparkParams.data().asFloat()));
System.out.println("Final (Local) params: " + Arrays.toString(fp));
System.out.println("Final (Spark) params: " + Arrays.toString(fps));
assertEquals(initialParams, initialSparkParams);
assertNotEquals(initialParams, finalParams);
assertArrayEquals(fp, fps, 1e-5f);
double sparkScore = sparkNet.getScore();
assertTrue(sparkScore > 0.0);
assertEquals(net.score(), sparkScore, 1e-3);
} finally {
sc.stop();
}
}
}
use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.
the class TestCompareParameterAveragingSparkVsSingleMachine method testAverageEveryStepGraphCNN.
@Test
public void testAverageEveryStepGraphCNN() {
//Idea: averaging every step with SGD (SGD updater + optimizer) is mathematically identical to doing the learning
// on a single machine for synchronous distributed training
//BUT: This is *ONLY* the case if all workers get an identical number of examples. This won't be the case if
// we use RDD.randomSplit (which is what occurs if we use .fit(JavaRDD<DataSet> on a data set that needs splitting),
// which might give a number of examples that isn't divisible by number of workers (like 39 examples on 4 executors)
//This is also ONLY the case using SGD updater
int miniBatchSizePerWorker = 10;
int nWorkers = 4;
for (boolean saveUpdater : new boolean[] { true, false }) {
JavaSparkContext sc = getContext(nWorkers);
try {
//Do training locally, for 3 minibatches
int[] seeds = { 1, 2, 3 };
ComputationGraph net = new ComputationGraph(getGraphConfCNN(12345, Updater.SGD));
net.init();
INDArray initialParams = net.params().dup();
for (int i = 0; i < seeds.length; i++) {
DataSet ds = getOneDataSetCNN(miniBatchSizePerWorker * nWorkers, seeds[i]);
if (!saveUpdater)
net.setUpdater(null);
net.fit(ds);
}
INDArray finalParams = net.params().dup();
//Do training on Spark with one executor, for 3 separate minibatches
TrainingMaster tm = getTrainingMaster(1, miniBatchSizePerWorker, saveUpdater);
SparkComputationGraph sparkNet = new SparkComputationGraph(sc, getGraphConfCNN(12345, Updater.SGD), tm);
sparkNet.setCollectTrainingStats(true);
INDArray initialSparkParams = sparkNet.getNetwork().params().dup();
for (int i = 0; i < seeds.length; i++) {
List<DataSet> list = getOneDataSetAsIndividalExamplesCNN(miniBatchSizePerWorker * nWorkers, seeds[i]);
JavaRDD<DataSet> rdd = sc.parallelize(list);
sparkNet.fit(rdd);
}
System.out.println(sparkNet.getSparkTrainingStats().statsAsString());
INDArray finalSparkParams = sparkNet.getNetwork().params().dup();
System.out.println("Initial (Local) params: " + Arrays.toString(initialParams.data().asFloat()));
System.out.println("Initial (Spark) params: " + Arrays.toString(initialSparkParams.data().asFloat()));
System.out.println("Final (Local) params: " + Arrays.toString(finalParams.data().asFloat()));
System.out.println("Final (Spark) params: " + Arrays.toString(finalSparkParams.data().asFloat()));
assertArrayEquals(initialParams.data().asFloat(), initialSparkParams.data().asFloat(), 1e-8f);
assertArrayEquals(finalParams.data().asFloat(), finalSparkParams.data().asFloat(), 1e-6f);
double sparkScore = sparkNet.getScore();
assertTrue(sparkScore > 0.0);
assertEquals(net.score(), sparkScore, 1e-3);
} finally {
sc.stop();
}
}
}
use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.
the class BaseStatsListener method getSessionID.
private String getSessionID(Model model) {
if (model instanceof MultiLayerNetwork || model instanceof ComputationGraph)
return sessionID;
if (model instanceof Layer) {
//Keep in mind MultiLayerNetwork implements Layer also...
Layer l = (Layer) model;
int layerIdx = l.getIndex();
return sessionID + "_layer" + layerIdx;
}
//Should never happen
return sessionID;
}
use of org.deeplearning4j.nn.graph.ComputationGraph in project deeplearning4j by deeplearning4j.
the class TestRenders method testHistogramComputationGraph.
@Test
public void testHistogramComputationGraph() throws Exception {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder().addInputs("input").addLayer("cnn1", new ConvolutionLayer.Builder(2, 2).stride(2, 2).nIn(1).nOut(3).build(), "input").addLayer("cnn2", new ConvolutionLayer.Builder(4, 4).stride(2, 2).padding(1, 1).nIn(1).nOut(3).build(), "input").addLayer("max1", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2).build(), "cnn1", "cnn2").addLayer("output", new OutputLayer.Builder().nIn(7 * 7 * 6).nOut(10).build(), "max1").setOutputs("output").inputPreProcessor("cnn1", new FeedForwardToCnnPreProcessor(28, 28, 1)).inputPreProcessor("cnn2", new FeedForwardToCnnPreProcessor(28, 28, 1)).inputPreProcessor("output", new CnnToFeedForwardPreProcessor(7, 7, 6)).pretrain(false).backprop(true).build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
graph.setListeners(new HistogramIterationListener(1), new ScoreIterationListener(1));
DataSetIterator mnist = new MnistDataSetIterator(32, 640, false, true, false, 12345);
graph.fit(mnist);
}
Aggregations