use of org.deeplearning4j.nn.conf.ComputationGraphConfiguration in project deeplearning4j by deeplearning4j.
the class ComputationGraphTestRNN method testRnnTimeStep2dInput.
@Test
public void testRnnTimeStep2dInput() {
Nd4j.getRandom().setSeed(12345);
int timeSeriesLength = 6;
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in").addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(5).nOut(7).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build(), "in").addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8).activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build(), "0").addLayer("2", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.DISTRIBUTION).nIn(8).nOut(4).activation(Activation.SOFTMAX).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5)).build(), "1").setOutputs("2").build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
INDArray input3d = Nd4j.rand(new int[] { 3, 5, timeSeriesLength });
INDArray out3d = graph.rnnTimeStep(input3d)[0];
assertArrayEquals(out3d.shape(), new int[] { 3, 4, timeSeriesLength });
graph.rnnClearPreviousState();
for (int i = 0; i < timeSeriesLength; i++) {
INDArray input2d = input3d.tensorAlongDimension(i, 1, 0);
INDArray out2d = graph.rnnTimeStep(input2d)[0];
assertArrayEquals(out2d.shape(), new int[] { 3, 4 });
INDArray expOut2d = out3d.tensorAlongDimension(i, 1, 0);
assertEquals(out2d, expOut2d);
}
//Check same but for input of size [3,5,1]. Expect [3,4,1] out
graph.rnnClearPreviousState();
for (int i = 0; i < timeSeriesLength; i++) {
INDArray temp = Nd4j.create(new int[] { 3, 5, 1 });
temp.tensorAlongDimension(0, 1, 0).assign(input3d.tensorAlongDimension(i, 1, 0));
INDArray out3dSlice = graph.rnnTimeStep(temp)[0];
assertArrayEquals(out3dSlice.shape(), new int[] { 3, 4, 1 });
assertTrue(out3dSlice.tensorAlongDimension(0, 1, 0).equals(out3d.tensorAlongDimension(i, 1, 0)));
}
}
use of org.deeplearning4j.nn.conf.ComputationGraphConfiguration in project deeplearning4j by deeplearning4j.
the class ComputationGraphTestRNN method checkMaskArrayClearance.
@Test
public void checkMaskArrayClearance() {
for (boolean tbptt : new boolean[] { true, false }) {
//Simple "does it throw an exception" type test...
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().iterations(1).seed(12345).graphBuilder().addInputs("in").addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).nIn(1).nOut(1).build(), "in").setOutputs("out").backpropType(tbptt ? BackpropType.TruncatedBPTT : BackpropType.Standard).tBPTTForwardLength(8).tBPTTBackwardLength(8).build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
MultiDataSet data = new MultiDataSet(new INDArray[] { Nd4j.linspace(1, 10, 10).reshape(1, 1, 10) }, new INDArray[] { Nd4j.linspace(2, 20, 10).reshape(1, 1, 10) }, new INDArray[] { Nd4j.ones(10) }, new INDArray[] { Nd4j.ones(10) });
net.fit(data);
assertNull(net.getInputMaskArrays());
assertNull(net.getLabelMaskArrays());
for (Layer l : net.getLayers()) {
assertNull(l.getMaskArray());
}
DataSet ds = new DataSet(data.getFeatures(0), data.getLabels(0), data.getFeaturesMaskArray(0), data.getLabelsMaskArray(0));
net.fit(ds);
assertNull(net.getInputMaskArrays());
assertNull(net.getLabelMaskArrays());
for (Layer l : net.getLayers()) {
assertNull(l.getMaskArray());
}
net.fit(data.getFeatures(), data.getLabels(), data.getFeaturesMaskArrays(), data.getLabelsMaskArrays());
assertNull(net.getInputMaskArrays());
assertNull(net.getLabelMaskArrays());
for (Layer l : net.getLayers()) {
assertNull(l.getMaskArray());
}
MultiDataSetIterator iter = new IteratorMultiDataSetIterator(Collections.singletonList((org.nd4j.linalg.dataset.api.MultiDataSet) data).iterator(), 1);
net.fit(iter);
assertNull(net.getInputMaskArrays());
assertNull(net.getLabelMaskArrays());
for (Layer l : net.getLayers()) {
assertNull(l.getMaskArray());
}
DataSetIterator iter2 = new IteratorDataSetIterator(Collections.singletonList(ds).iterator(), 1);
net.fit(iter2);
assertNull(net.getInputMaskArrays());
assertNull(net.getLabelMaskArrays());
for (Layer l : net.getLayers()) {
assertNull(l.getMaskArray());
}
}
}
use of org.deeplearning4j.nn.conf.ComputationGraphConfiguration in project deeplearning4j by deeplearning4j.
the class GradientCheckTestsComputationGraph method testCnnDepthMerge.
@Test
public void testCnnDepthMerge() {
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.1)).updater(Updater.NONE).learningRate(1.0).graphBuilder().addInputs("input").addLayer("l1", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0).nIn(2).nOut(2).activation(Activation.TANH).build(), "input").addLayer("l2", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0).nIn(2).nOut(2).activation(Activation.TANH).build(), "input").addVertex("merge", new MergeVertex(), "l1", "l2").addLayer("outputLayer", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(5 * 5 * (2 + 2)).nOut(3).build(), "merge").setOutputs("outputLayer").inputPreProcessor("outputLayer", new CnnToFeedForwardPreProcessor(5, 5, 4)).pretrain(false).backprop(true).build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
Random r = new Random(12345);
//Order: examples, channels, height, width
INDArray input = Nd4j.rand(new int[] { 5, 2, 6, 6 });
INDArray labels = Nd4j.zeros(5, 3);
for (int i = 0; i < 5; i++) labels.putScalar(new int[] { i, r.nextInt(3) }, 1.0);
if (PRINT_RESULTS) {
System.out.println("testCnnDepthMerge()");
for (int j = 0; j < graph.getNumLayers(); j++) System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
}
boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] { input }, new INDArray[] { labels });
String msg = "testCnnDepthMerge()";
assertTrue(msg, gradOK);
}
use of org.deeplearning4j.nn.conf.ComputationGraphConfiguration in project deeplearning4j by deeplearning4j.
the class GradientCheckTestsComputationGraph method testLSTMWithLastTimeStepVertex.
@Test
public void testLSTMWithLastTimeStepVertex() {
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(Updater.NONE).learningRate(1.0).graphBuilder().addInputs("input").setOutputs("out").addLayer("lstm1", new GravesLSTM.Builder().nIn(3).nOut(4).activation(Activation.TANH).build(), "input").addVertex("lastTS", new LastTimeStepVertex("input"), "lstm1").addLayer("out", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(), "lastTS").pretrain(false).backprop(true).build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
Random r = new Random(12345);
INDArray input = Nd4j.rand(new int[] { 3, 3, 5 });
//Here: labels are 2d (due to LastTimeStepVertex)
INDArray labels = Nd4j.zeros(3, 3);
for (int i = 0; i < 3; i++) {
labels.putScalar(new int[] { i, r.nextInt(3) }, 1.0);
}
if (PRINT_RESULTS) {
System.out.println("testLSTMWithLastTimeStepVertex()");
for (int j = 0; j < graph.getNumLayers(); j++) System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
}
//First: test with no input mask array
boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] { input }, new INDArray[] { labels });
String msg = "testLSTMWithLastTimeStepVertex()";
assertTrue(msg, gradOK);
//Second: test with input mask arrays.
INDArray inMask = Nd4j.zeros(3, 5);
inMask.putRow(0, Nd4j.create(new double[] { 1, 1, 1, 0, 0 }));
inMask.putRow(1, Nd4j.create(new double[] { 1, 1, 1, 1, 0 }));
inMask.putRow(2, Nd4j.create(new double[] { 1, 1, 1, 1, 1 }));
graph.setLayerMaskArrays(new INDArray[] { inMask }, null);
gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] { input }, new INDArray[] { labels });
assertTrue(msg, gradOK);
}
use of org.deeplearning4j.nn.conf.ComputationGraphConfiguration in project deeplearning4j by deeplearning4j.
the class GradientCheckTestsComputationGraph method testLSTMWithSubset.
@Test
public void testLSTMWithSubset() {
Nd4j.getRandom().setSeed(1234);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(1234).optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(Updater.NONE).learningRate(1.0).graphBuilder().addInputs("input").setOutputs("out").addLayer("lstm1", new GravesLSTM.Builder().nIn(3).nOut(8).activation(Activation.TANH).build(), "input").addVertex("subset", new SubsetVertex(0, 3), "lstm1").addLayer("out", new RnnOutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(), "subset").pretrain(false).backprop(true).build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
Random r = new Random(12345);
INDArray input = Nd4j.rand(new int[] { 3, 3, 5 });
INDArray labels = Nd4j.zeros(3, 3, 5);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 5; j++) {
labels.putScalar(new int[] { i, r.nextInt(3), j }, 1.0);
}
}
if (PRINT_RESULTS) {
System.out.println("testLSTMWithSubset()");
for (int j = 0; j < graph.getNumLayers(); j++) System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
}
boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] { input }, new INDArray[] { labels });
String msg = "testLSTMWithSubset()";
assertTrue(msg, gradOK);
}
Aggregations