use of org.nd4j.linalg.api.ndarray.INDArray in project deeplearning4j by deeplearning4j.
the class BatchNormalizationTest method test2dVs4d.
@Test
public void test2dVs4d() {
//Idea: 2d and 4d should be the same...
Nd4j.getRandom().setSeed(12345);
int m = 2;
int h = 3;
int w = 3;
int nOut = 2;
INDArray in = Nd4j.rand('c', m * h * w, nOut);
INDArray in4 = in.dup();
in4 = Shape.newShapeNoCopy(in4, new int[] { m, h, w, nOut }, false);
assertNotNull(in4);
in4 = in4.permute(0, 3, 1, 2).dup();
INDArray arr = Nd4j.rand(1, m * h * w * nOut).reshape('f', h, w, m, nOut).permute(2, 3, 1, 0);
in4 = arr.assign(in4);
Layer l1 = getLayer(nOut);
Layer l2 = getLayer(nOut);
INDArray out2d = l1.activate(in.dup(), true);
INDArray out4d = l2.activate(in4.dup(), true);
INDArray out4dAs2 = out4d.permute(0, 2, 3, 1).dup('c');
out4dAs2 = Shape.newShapeNoCopy(out4dAs2, new int[] { m * h * w, nOut }, false);
assertEquals(out2d, out4dAs2);
//Test backprop:
INDArray epsilons2d = Nd4j.rand('c', m * h * w, nOut);
INDArray epsilons4d = epsilons2d.dup();
epsilons4d = Shape.newShapeNoCopy(epsilons4d, new int[] { m, h, w, nOut }, false);
assertNotNull(epsilons4d);
epsilons4d = epsilons4d.permute(0, 3, 1, 2).dup();
Pair<Gradient, INDArray> b2d = l1.backpropGradient(epsilons2d);
Pair<Gradient, INDArray> b4d = l2.backpropGradient(epsilons4d);
INDArray e4dAs2d = b4d.getSecond().permute(0, 2, 3, 1).dup('c');
e4dAs2d = Shape.newShapeNoCopy(e4dAs2d, new int[] { m * h * w, nOut }, false);
assertEquals(b2d.getSecond(), e4dAs2d);
}
use of org.nd4j.linalg.api.ndarray.INDArray in project deeplearning4j by deeplearning4j.
the class LocalResponseTest method testLrnManual.
@Test
public void testLrnManual() {
int wh = 5;
int depth = 6;
int minibatch = 3;
int n = 4;
double k = 2.0;
double alpha = 1e-4;
double beta = 0.75;
INDArray in = Nd4j.rand(new int[] { minibatch, depth, wh, wh });
INDArray outExp = Nd4j.zeros(minibatch, depth, wh, wh);
for (int m = 0; m < minibatch; m++) {
for (int x = 0; x < wh; x++) {
for (int y = 0; y < wh; y++) {
for (int i = 0; i < depth; i++) {
int jFrom = Math.max(0, i - n / 2);
int jTo = Math.min(depth - 1, i + n / 2);
double sum = 0.0;
for (int j = jFrom; j <= jTo; j++) {
double d = in.getDouble(m, j, x, y);
sum += d * d;
}
double out = in.getDouble(m, i, x, y) / Math.pow(k + alpha * sum, beta);
outExp.putScalar(m, i, x, y, out);
}
}
}
}
LocalResponseNormalization lrn = new LocalResponseNormalization.Builder().build();
NeuralNetConfiguration nnc = new NeuralNetConfiguration.Builder().layer(lrn).build();
org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization layer = (org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization) lrn.instantiate(nnc, null, 0, null, false);
INDArray outAct = layer.activate(in, true);
assertEquals(outExp, outAct);
}
use of org.nd4j.linalg.api.ndarray.INDArray in project deeplearning4j by deeplearning4j.
the class ConvolutionLayerTest method testCNNTooLargeKernel.
@Test(expected = DL4JException.class)
public void testCNNTooLargeKernel() {
int imageHeight = 20;
int imageWidth = 23;
int nChannels = 1;
int classes = 2;
int numSamples = 200;
int kernelHeight = imageHeight;
int kernelWidth = imageWidth + 1;
DataSet trainInput;
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(123).iterations(1).list().layer(0, //(img-kernel+2*padding)/stride + 1: must be >= 1. Therefore: with p=0, kernel <= img size
new ConvolutionLayer.Builder(kernelHeight, kernelWidth).stride(1, 1).nOut(2).activation(Activation.RELU).weightInit(WeightInit.XAVIER).build()).layer(1, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build()).setInputType(InputType.convolutionalFlat(imageHeight, imageWidth, nChannels)).backprop(true).pretrain(false);
MultiLayerConfiguration conf = builder.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
INDArray emptyFeatures = Nd4j.zeros(numSamples, imageWidth * imageHeight * nChannels);
INDArray emptyLables = Nd4j.zeros(numSamples, classes);
trainInput = new DataSet(emptyFeatures, emptyLables);
model.fit(trainInput);
}
use of org.nd4j.linalg.api.ndarray.INDArray in project deeplearning4j by deeplearning4j.
the class ConvolutionLayerTest method getCNNConfig.
//////////////////////////////////////////////////////////////////////////////////
private static Layer getCNNConfig(int nIn, int nOut, int[] kernelSize, int[] stride, int[] padding) {
ConvolutionLayer layer = new ConvolutionLayer.Builder(kernelSize, stride, padding).nIn(nIn).nOut(nOut).activation(Activation.SIGMOID).build();
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().iterations(1).layer(layer).build();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
return conf.getLayer().instantiate(conf, null, 0, params, true);
}
use of org.nd4j.linalg.api.ndarray.INDArray in project deeplearning4j by deeplearning4j.
the class ConvolutionLayerTest method testCNNMLNPretrain.
//////////////////////////////////////////////////////////////////////////////////
@Test
public void testCNNMLNPretrain() throws Exception {
// Note CNN does not do pretrain
int numSamples = 10;
int batchSize = 10;
DataSetIterator mnistIter = new MnistDataSetIterator(batchSize, numSamples, true);
MultiLayerNetwork model = getCNNMLNConfig(false, true);
model.fit(mnistIter);
mnistIter.reset();
MultiLayerNetwork model2 = getCNNMLNConfig(false, true);
model2.fit(mnistIter);
mnistIter.reset();
DataSet test = mnistIter.next();
Evaluation eval = new Evaluation();
INDArray output = model.output(test.getFeatureMatrix());
eval.eval(test.getLabels(), output);
double f1Score = eval.f1();
Evaluation eval2 = new Evaluation();
INDArray output2 = model2.output(test.getFeatureMatrix());
eval2.eval(test.getLabels(), output2);
double f1Score2 = eval2.f1();
assertEquals(f1Score, f1Score2, 1e-4);
}
Aggregations