use of org.nd4j.linalg.api.ops.impl.accum.Sum in project nd4j by deeplearning4j.
the class CudaAccumTests method testPinnedSum.
/**
* Sum call
* @throws Exception
*/
@Test
public void testPinnedSum() throws Exception {
// simple way to stop test if we're not on CUDA backend here
assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName());
INDArray array1 = Nd4j.create(new float[] { 2.01f, 2.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f });
Sum sum = new Sum(array1);
Nd4j.getExecutioner().exec(sum, 1);
Number resu = sum.getFinalResult();
System.out.println("Result: " + resu);
assertEquals(17.15f, resu.floatValue(), 0.01f);
}
use of org.nd4j.linalg.api.ops.impl.accum.Sum in project nd4j by deeplearning4j.
the class Nd4jTestsC method testScalarReduction1.
@Test
public void testScalarReduction1() {
Accumulation op = new Norm2(Nd4j.create(1).assign(1.0));
double norm2 = Nd4j.getExecutioner().execAndReturn(op).getFinalResult().doubleValue();
double norm1 = Nd4j.getExecutioner().execAndReturn(new Norm1(Nd4j.create(1).assign(1.0))).getFinalResult().doubleValue();
double sum = Nd4j.getExecutioner().execAndReturn(new Sum(Nd4j.create(1).assign(1.0))).getFinalResult().doubleValue();
assertEquals(1.0, norm2, 0.001);
assertEquals(1.0, norm1, 0.001);
assertEquals(1.0, sum, 0.001);
}
use of org.nd4j.linalg.api.ops.impl.accum.Sum in project nd4j by deeplearning4j.
the class GridExecutionerTest method testOpPointerizeReduce1.
/**
* Reduce along dimensions
*
* @throws Exception
*/
@Test
public void testOpPointerizeReduce1() throws Exception {
CudaGridExecutioner executioner = new CudaGridExecutioner();
INDArray array = Nd4j.create(10, 10);
Sum opA = new Sum(array);
// we need exec here, to init Op.Z for specific dimension
executioner.exec(opA, 1);
GridPointers pointers = executioner.pointerizeOp(opA, 1);
assertEquals(opA.opNum(), pointers.getOpNum());
assertEquals(Op.Type.REDUCE, pointers.getType());
CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext();
Pointer x = AtomicAllocator.getInstance().getPointer(array, context);
Pointer xShapeInfo = AtomicAllocator.getInstance().getPointer(array.shapeInfoDataBuffer(), context);
Pointer z = AtomicAllocator.getInstance().getPointer(opA.z(), context);
Pointer zShapeInfo = AtomicAllocator.getInstance().getPointer(opA.z().shapeInfoDataBuffer(), context);
DataBuffer dimBuff = Nd4j.getConstantHandler().getConstantBuffer(new int[] { 1 });
Pointer ptrBuff = AtomicAllocator.getInstance().getPointer(dimBuff, context);
assertEquals(x, pointers.getX());
assertEquals(null, pointers.getY());
assertNotEquals(null, pointers.getZ());
assertEquals(z, pointers.getZ());
assertEquals(10, opA.z().length());
assertEquals(10, pointers.getZLength());
/* // We dont really care about EWS here, since we're testing TAD-based operation
assertEquals(1, pointers.getXStride());
assertEquals(-1, pointers.getYStride());
assertEquals(1, pointers.getZStride());
*/
assertEquals(xShapeInfo, pointers.getXShapeInfo());
assertEquals(null, pointers.getYShapeInfo());
assertEquals(zShapeInfo, pointers.getZShapeInfo());
assertEquals(ptrBuff, pointers.getDimensions());
assertEquals(1, pointers.getDimensionsLength());
assertNotEquals(null, pointers.getTadShape());
assertNotEquals(null, pointers.getTadOffsets());
assertEquals(null, pointers.getExtraArgs());
}
use of org.nd4j.linalg.api.ops.impl.accum.Sum in project nd4j by deeplearning4j.
the class MetaOpTests method testPredicateReduce2.
/**
* Predicate test for scalar + reduceScalar
*
* @throws Exception
*/
@Ignore
@Test
public void testPredicateReduce2() throws Exception {
CudaGridExecutioner executioner = new CudaGridExecutioner();
INDArray arrayX = Nd4j.create(5, 5);
ScalarAdd opA = new ScalarAdd(arrayX, 1.0f);
Sum opB = new Sum(arrayX);
PredicateMetaOp metaOp = new PredicateMetaOp(opA, opB);
executioner.exec(metaOp);
INDArray result = opB.z();
assertNotEquals(null, result);
assertTrue(result.isScalar());
assertEquals(25f, result.getFloat(0), 0.1f);
}
Aggregations