use of org.deeplearning4j.nn.conf.preprocessor.FeedForwardToCnnPreProcessor in project deeplearning4j by deeplearning4j.
the class TestRenders method testHistogramComputationGraph.
@Test
public void testHistogramComputationGraph() throws Exception {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder().addInputs("input").addLayer("cnn1", new ConvolutionLayer.Builder(2, 2).stride(2, 2).nIn(1).nOut(3).build(), "input").addLayer("cnn2", new ConvolutionLayer.Builder(4, 4).stride(2, 2).padding(1, 1).nIn(1).nOut(3).build(), "input").addLayer("max1", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2).build(), "cnn1", "cnn2").addLayer("output", new OutputLayer.Builder().nIn(7 * 7 * 6).nOut(10).build(), "max1").setOutputs("output").inputPreProcessor("cnn1", new FeedForwardToCnnPreProcessor(28, 28, 1)).inputPreProcessor("cnn2", new FeedForwardToCnnPreProcessor(28, 28, 1)).inputPreProcessor("output", new CnnToFeedForwardPreProcessor(7, 7, 6)).pretrain(false).backprop(true).build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
graph.setListeners(new HistogramIterationListener(1), new ScoreIterationListener(1));
DataSetIterator mnist = new MnistDataSetIterator(32, 640, false, true, false, 12345);
graph.fit(mnist);
}
use of org.deeplearning4j.nn.conf.preprocessor.FeedForwardToCnnPreProcessor in project deeplearning4j by deeplearning4j.
the class ComputationGraphConfigurationTest method testJSONBasic2.
@Test
public void testJSONBasic2() {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder().addInputs("input").addLayer("cnn1", new ConvolutionLayer.Builder(2, 2).stride(2, 2).nIn(1).nOut(5).build(), "input").addLayer("cnn2", new ConvolutionLayer.Builder(2, 2).stride(2, 2).nIn(1).nOut(5).build(), "input").addLayer("max1", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2).build(), "cnn1", "cnn2").addLayer("dnn1", new DenseLayer.Builder().nOut(7).build(), "max1").addLayer("max2", new SubsamplingLayer.Builder().build(), "max1").addLayer("output", new OutputLayer.Builder().nIn(7).nOut(10).build(), "dnn1", "max2").setOutputs("output").inputPreProcessor("cnn1", new FeedForwardToCnnPreProcessor(32, 32, 3)).inputPreProcessor("cnn2", new FeedForwardToCnnPreProcessor(32, 32, 3)).inputPreProcessor("dnn1", new CnnToFeedForwardPreProcessor(8, 8, 5)).pretrain(false).backprop(true).build();
String json = conf.toJson();
ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json);
assertEquals(json, conf2.toJson());
assertEquals(conf, conf2);
}
use of org.deeplearning4j.nn.conf.preprocessor.FeedForwardToCnnPreProcessor in project deeplearning4j by deeplearning4j.
the class ConvolutionLayerSetupTest method complete.
public MultiLayerConfiguration.Builder complete() {
final int numRows = 28;
final int numColumns = 28;
int nChannels = 1;
int outputNum = 10;
int iterations = 10;
int seed = 123;
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed).iterations(iterations).optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list().layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(new int[] { 10, 10 }, new int[] { 2, 2 }).nIn(nChannels).nOut(6).build()).layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] { 2, 2 }).weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()).layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nIn(//216
5 * 5 * 1 * 6).nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build()).inputPreProcessor(0, new FeedForwardToCnnPreProcessor(numRows, numColumns, nChannels)).inputPreProcessor(2, new CnnToFeedForwardPreProcessor(5, 5, 6)).backprop(true).pretrain(false);
return builder;
}
use of org.deeplearning4j.nn.conf.preprocessor.FeedForwardToCnnPreProcessor in project deeplearning4j by deeplearning4j.
the class TestComputationGraphNetwork method testCnnFlatInputType1.
@Test
public void testCnnFlatInputType1() {
//First: check conv input type. Expect: no preprocessor, nIn set appropriately
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in").setInputTypes(InputType.convolutional(10, 8, 3)).addLayer("layer", new ConvolutionLayer.Builder().kernelSize(2, 2).padding(0, 0).stride(1, 1).build(), "in").addLayer("out", new OutputLayer.Builder().nOut(10).build(), "layer").setOutputs("out").pretrain(false).backprop(true).build();
LayerVertex lv = (LayerVertex) conf.getVertices().get("layer");
FeedForwardLayer l = ((FeedForwardLayer) (lv).getLayerConf().getLayer());
assertEquals(3, l.getNIn());
assertNull(lv.getPreProcessor());
//Check the equivalent config, but with flat conv data input instead
//In this case, the only difference should be the addition of a preprocessor
//First: check conv input type. Expect: no preprocessor, nIn set appropriately
conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in").setInputTypes(InputType.convolutionalFlat(10, 8, 3)).addLayer("layer", new ConvolutionLayer.Builder().kernelSize(2, 2).padding(0, 0).stride(1, 1).build(), "in").addLayer("out", new OutputLayer.Builder().nOut(10).build(), "layer").setOutputs("out").pretrain(false).backprop(true).build();
lv = (LayerVertex) conf.getVertices().get("layer");
l = ((FeedForwardLayer) (lv).getLayerConf().getLayer());
assertEquals(3, l.getNIn());
assertNotNull(lv.getPreProcessor());
InputPreProcessor preProcessor = lv.getPreProcessor();
assertTrue(preProcessor instanceof FeedForwardToCnnPreProcessor);
FeedForwardToCnnPreProcessor preproc = (FeedForwardToCnnPreProcessor) preProcessor;
assertEquals(10, preproc.getInputHeight());
assertEquals(8, preproc.getInputWidth());
assertEquals(3, preproc.getNumChannels());
//Finally, check configuration with a subsampling layer
conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in").setInputTypes(InputType.convolutionalFlat(10, 8, 3)).addLayer("l0", new SubsamplingLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0).build(), "in").addLayer("layer", new ConvolutionLayer.Builder().kernelSize(2, 2).padding(0, 0).stride(1, 1).build(), "l0").addLayer("out", new OutputLayer.Builder().nOut(10).build(), "layer").setOutputs("out").pretrain(false).backprop(true).build();
//Check subsampling layer:
lv = (LayerVertex) conf.getVertices().get("l0");
SubsamplingLayer sl = ((SubsamplingLayer) (lv).getLayerConf().getLayer());
assertNotNull(lv.getPreProcessor());
preProcessor = lv.getPreProcessor();
assertTrue(preProcessor instanceof FeedForwardToCnnPreProcessor);
preproc = (FeedForwardToCnnPreProcessor) preProcessor;
assertEquals(10, preproc.getInputHeight());
assertEquals(8, preproc.getInputWidth());
assertEquals(3, preproc.getNumChannels());
//Check dense layer
lv = (LayerVertex) conf.getVertices().get("layer");
l = ((FeedForwardLayer) (lv).getLayerConf().getLayer());
assertEquals(3, l.getNIn());
assertNull(lv.getPreProcessor());
}
Aggregations