use of org.nd4j.linalg.api.ops.CustomOp in project nd4j by deeplearning4j.
the class LossBinaryXENT method computeGradient.
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
if (labels.size(1) != preOutput.size(1)) {
throw new IllegalArgumentException("Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer" + " number of outputs (nOut = " + preOutput.size(1) + ") ");
}
INDArray output = activationFn.getActivation(preOutput.dup(), true);
if (clipEps > 0.0) {
CustomOp op = DynamicCustomOp.builder("clipbyvalue").addInputs(output).callInplace(true).addFloatingPointArguments(clipEps, 1.0 - clipEps).build();
Nd4j.getExecutioner().exec(op);
}
INDArray numerator = output.sub(labels);
// output * (1-output)
INDArray denominator = Nd4j.getExecutioner().execAndReturn(new TimesOneMinus(output));
INDArray dLda = numerator.divi(denominator);
if (mask != null && LossUtil.isPerOutputMasking(dLda, mask)) {
// For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later
// but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j)
// We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be
// error prone - but buy us a tiny bit of performance
LossUtil.applyMask(dLda, mask);
}
// TODO activation functions with weights
INDArray grad = activationFn.backprop(preOutput, dLda).getFirst();
// Weighted loss function
if (weights != null) {
if (weights.length() != output.size(1)) {
throw new IllegalStateException("Weights vector (length " + weights.length() + ") does not match output.size(1)=" + output.size(1));
}
grad.muliRowVector(weights);
}
if (mask != null) {
LossUtil.applyMask(grad, mask);
}
return grad;
}
use of org.nd4j.linalg.api.ops.CustomOp in project nd4j by deeplearning4j.
the class CustomOpsTests method testNoOp1.
@Test
// it's noop, we dont care anymore
@Ignore
public void testNoOp1() throws Exception {
val arrayX = Nd4j.create(10, 10);
val arrayY = Nd4j.create(5, 3);
arrayX.assign(3.0);
arrayY.assign(1.0);
val expX = Nd4j.create(10, 10).assign(3.0);
val expY = Nd4j.create(5, 3).assign(1.0);
CustomOp op = DynamicCustomOp.builder("noop").addInputs(arrayX, arrayY).addOutputs(arrayX, arrayY).build();
Nd4j.getExecutioner().exec(op);
assertEquals(expX, arrayX);
assertEquals(expY, arrayY);
}
use of org.nd4j.linalg.api.ops.CustomOp in project nd4j by deeplearning4j.
the class CustomOpsTests method testFloor.
@Test
public void testFloor() throws Exception {
val arrayX = Nd4j.create(10, 10);
arrayX.assign(3.0);
val exp = Nd4j.create(10, 10).assign(3.0);
CustomOp op = DynamicCustomOp.builder("floor").addInputs(arrayX).addOutputs(arrayX).build();
Nd4j.getExecutioner().exec(op);
assertEquals(exp, arrayX);
}
use of org.nd4j.linalg.api.ops.CustomOp in project nd4j by deeplearning4j.
the class CustomOpsTests method testOutputShapes1.
@Test
public void testOutputShapes1() {
// some random array with +ve numbers
val array0 = Nd4j.rand('f', 5, 2).addi(1);
val array1 = array0.dup().addi(5);
// array1 is always bigger than array0 except at 0,0
array1.put(0, 0, 0);
// expected value of maxmerge
val exp = array1.dup();
exp.putScalar(0, 0, array0.getDouble(0, 0));
CustomOp op = DynamicCustomOp.builder("mergemax").addInputs(array0, array1).build();
val shapes = Nd4j.getExecutioner().calculateOutputShape(op);
assertEquals(1, shapes.size());
assertArrayEquals(new int[] { 5, 2 }, shapes.get(0));
}
use of org.nd4j.linalg.api.ops.CustomOp in project nd4j by deeplearning4j.
the class CustomOpsTests method testInplaceOp1.
@Test
public void testInplaceOp1() throws Exception {
val arrayX = Nd4j.create(10, 10);
val arrayY = Nd4j.create(10, 10);
arrayX.assign(4.0);
arrayY.assign(2.0);
val exp = Nd4j.create(10, 10).assign(6.0);
CustomOp op = DynamicCustomOp.builder("add").addInputs(arrayX, arrayY).callInplace(true).build();
Nd4j.getExecutioner().exec(op);
assertEquals(exp, arrayX);
}
Aggregations