use of org.nd4j.linalg.api.ops.impl.transforms.Abs in project nd4j by deeplearning4j.
the class LossMAPE method computeGradient.
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
if (labels.size(1) != preOutput.size(1)) {
throw new IllegalArgumentException("Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer" + " number of outputs (nOut = " + preOutput.size(1) + ") ");
}
INDArray output = activationFn.getActivation(preOutput.dup(), true);
INDArray actSubPredicted = labels.sub(output);
INDArray dLda = Nd4j.getExecutioner().execAndReturn(new Sign(actSubPredicted));
INDArray absLabels = Nd4j.getExecutioner().execAndReturn(new Abs(labels.dup()));
dLda.divi(absLabels).muli(-100.0 / labels.size(1));
// Weighted loss function
if (weights != null) {
dLda.muliRowVector(weights);
}
if (mask != null && LossUtil.isPerOutputMasking(dLda, mask)) {
// For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later
// but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j)
// We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be
// error prone - but buy us a tiny bit of performance
LossUtil.applyMask(dLda, mask);
}
// TODO activation functions with params
INDArray gradient = activationFn.backprop(preOutput, dLda).getFirst();
if (mask != null) {
LossUtil.applyMask(gradient, mask);
}
return gradient;
}
use of org.nd4j.linalg.api.ops.impl.transforms.Abs in project deeplearning4j by deeplearning4j.
the class RegressionEvaluation method eval.
@Override
public void eval(INDArray labels, INDArray predictions) {
//References for the calculations is this section:
//https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
//https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient#For_a_sample
//Doing online calculation of means, sum of squares, etc.
labelsSumPerColumn.addi(labels.sum(0));
INDArray error = predictions.sub(labels);
INDArray absErrorSum = Nd4j.getExecutioner().execAndReturn(new Abs(error.dup())).sum(0);
INDArray squaredErrorSum = error.mul(error).sum(0);
sumAbsErrorsPerColumn.addi(absErrorSum);
sumSquaredErrorsPerColumn.addi(squaredErrorSum);
sumOfProducts.addi(labels.mul(predictions).sum(0));
sumSquaredLabels.addi(labels.mul(labels).sum(0));
sumSquaredPredicted.addi(predictions.mul(predictions).sum(0));
int nRows = labels.size(0);
currentMean.muli(exampleCount).addi(labels.sum(0)).divi(exampleCount + nRows);
currentPredictionMean.muli(exampleCount).addi(predictions.sum(0)).divi(exampleCount + nRows);
exampleCount += nRows;
}
use of org.nd4j.linalg.api.ops.impl.transforms.Abs in project nd4j by deeplearning4j.
the class GridExecutionerTest method testGridFlow6.
@Test
public void testGridFlow6() throws Exception {
CudaGridExecutioner executioner = new CudaGridExecutioner();
INDArray arrayX = Nd4j.create(new float[] { -1f, -1f, 1f });
INDArray exp = Nd4j.create(new float[] { 1f, 1f, 1f });
Abs op = new Abs(arrayX);
executioner.exec(op);
op = new Abs(arrayX);
executioner.exec(op);
assertEquals(0, executioner.getQueueLength());
assertEquals(exp, arrayX);
}
use of org.nd4j.linalg.api.ops.impl.transforms.Abs in project nd4j by deeplearning4j.
the class MetaOpTests method testLinearMetaOp1.
@Ignore
@Test
public void testLinearMetaOp1() throws Exception {
CudaGridExecutioner executioner = new CudaGridExecutioner();
INDArray array = Nd4j.create(new float[] { -11f, -12f, -13f, -14f, -15f, -16f, -17f, -18f, -19f, -20f });
INDArray exp = Nd4j.create(new float[] { 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f });
INDArray exp2 = Nd4j.create(new float[] { 11f, 12f, 13f, 14f, 15f, 16f, 17f, 18f, 19f, 20f });
ScalarAdd opA = new ScalarAdd(array, 10f);
Abs opB = new Abs(array);
PredicateMetaOp metaOp = new PredicateMetaOp(opA, opB);
executioner.prepareGrid(metaOp);
GridDescriptor descriptor = metaOp.getGridDescriptor();
assertEquals(2, descriptor.getGridDepth());
assertEquals(2, descriptor.getGridPointers().size());
assertEquals(Op.Type.SCALAR, descriptor.getGridPointers().get(0).getType());
assertEquals(Op.Type.TRANSFORM, descriptor.getGridPointers().get(1).getType());
long time1 = System.nanoTime();
executioner.exec(metaOp);
long time2 = System.nanoTime();
System.out.println("Execution time Meta: " + ((time2 - time1) / 1));
assertEquals(exp, array);
time1 = System.nanoTime();
Nd4j.getExecutioner().exec(opA);
Nd4j.getExecutioner().exec(opB);
time2 = System.nanoTime();
System.out.println("Execution time Linear: " + ((time2 - time1) / 1));
assertEquals(exp2, array);
}
use of org.nd4j.linalg.api.ops.impl.transforms.Abs in project nd4j by deeplearning4j.
the class MetaOpTests method testLinearMetaOp2.
@Ignore
@Test
public void testLinearMetaOp2() throws Exception {
CudaGridExecutioner executioner = new CudaGridExecutioner();
INDArray array = Nd4j.create(new float[] { -11f, -12f, -13f, -14f, -15f, -16f, -17f, -18f, -19f, -20f });
INDArray exp = Nd4j.create(new float[] { 21f, 22f, 23f, 24f, 25f, 26f, 27f, 28f, 29f, 30f });
INDArray exp2 = Nd4j.create(new float[] { 31f, 32f, 33f, 34f, 35f, 36f, 37f, 38f, 39f, 40f });
Abs opA = new Abs(array);
ScalarAdd opB = new ScalarAdd(array, 10f);
PredicateMetaOp metaOp = new PredicateMetaOp(opA, opB);
executioner.prepareGrid(metaOp);
GridDescriptor descriptor = metaOp.getGridDescriptor();
assertEquals(2, descriptor.getGridDepth());
assertEquals(2, descriptor.getGridPointers().size());
assertEquals(Op.Type.TRANSFORM, descriptor.getGridPointers().get(0).getType());
assertEquals(Op.Type.SCALAR, descriptor.getGridPointers().get(1).getType());
long time1 = System.nanoTime();
executioner.exec(metaOp);
long time2 = System.nanoTime();
System.out.println("Execution time Meta: " + ((time2 - time1) / 1));
assertEquals(exp, array);
time1 = System.nanoTime();
Nd4j.getExecutioner().exec(opA);
Nd4j.getExecutioner().exec(opB);
time2 = System.nanoTime();
System.out.println("Execution time Linear: " + ((time2 - time1) / 1));
assertEquals(exp2, array);
}
Aggregations