Search in sources :

Example 1 with CudaDoubleDataBuffer

use of org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer in project nd4j by deeplearning4j.

the class JCublasNDArrayFactory method concat.

@Override
public INDArray concat(int dimension, INDArray... toConcat) {
    if (Nd4j.getExecutioner() instanceof GridExecutioner)
        ((GridExecutioner) Nd4j.getExecutioner()).flushQueue();
    if (toConcat.length == 1)
        return toConcat[0];
    int sumAlongDim = 0;
    for (int i = 0; i < toConcat.length; i++) {
        if (toConcat[i].isCompressed())
            Nd4j.getCompressor().decompressi(toConcat[i]);
        sumAlongDim += toConcat[i].size(dimension);
    }
    int[] outputShape = ArrayUtil.copy(toConcat[0].shape());
    outputShape[dimension] = sumAlongDim;
    INDArray ret = Nd4j.createUninitialized(outputShape, Nd4j.order());
    AtomicAllocator allocator = AtomicAllocator.getInstance();
    CudaContext context = allocator.getFlowController().prepareAction(ret, toConcat);
    long[] shapeInfoPointers = new long[toConcat.length];
    long[] dataPointers = new long[toConcat.length];
    long[] tadPointers = new long[toConcat.length];
    long[] offsetsPointers = new long[toConcat.length];
    long[] hostShapeInfoPointers = new long[toConcat.length];
    TADManager tadManager = Nd4j.getExecutioner().getTADManager();
    for (int i = 0; i < toConcat.length; i++) {
        shapeInfoPointers[i] = AddressRetriever.retrieveDeviceAddress(toConcat[i].shapeInfoDataBuffer(), context);
        dataPointers[i] = AtomicAllocator.getInstance().getPointer(toConcat[i], context).address();
        hostShapeInfoPointers[i] = AtomicAllocator.getInstance().getHostPointer(toConcat[i].shapeInfoDataBuffer()).address();
        sumAlongDim += toConcat[i].size(dimension);
        for (int j = 0; j < toConcat[i].rank(); j++) if (j != dimension && toConcat[i].size(j) != outputShape[j]) {
            throw new IllegalArgumentException("Illegal concatenation at array " + i + " and shape element " + j);
        }
        Pair<DataBuffer, DataBuffer> tadBuffers = tadManager.getTADOnlyShapeInfo(toConcat[i], new int[] { dimension });
        long devTadShapeInfo = AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context).address();
        DataBuffer offsets = tadBuffers.getSecond();
        long devTadOffsets = AtomicAllocator.getInstance().getPointer(offsets, context).address();
        tadPointers[i] = devTadShapeInfo;
        offsetsPointers[i] = devTadOffsets;
    }
    // getting tadOnlyShape for result
    Pair<DataBuffer, DataBuffer> zBuffers = tadManager.getTADOnlyShapeInfo(ret, new int[] { dimension });
    // System.out.println("shapePointers: " + Arrays.toString(shapeInfoPointers));
    Pointer dZ = AtomicAllocator.getInstance().getPointer(ret, context);
    Pointer dZShapeInfo = AddressRetriever.retrieveDevicePointer(ret.shapeInfoDataBuffer(), context);
    CudaDoubleDataBuffer tempData = new CudaDoubleDataBuffer(toConcat.length);
    CudaDoubleDataBuffer tempShapes = new CudaDoubleDataBuffer(toConcat.length);
    CudaDoubleDataBuffer tempTAD = new CudaDoubleDataBuffer(toConcat.length);
    CudaDoubleDataBuffer tempOffsets = new CudaDoubleDataBuffer(toConcat.length);
    AtomicAllocator.getInstance().memcpyBlocking(tempData, new LongPointer(dataPointers), dataPointers.length * 8, 0);
    AtomicAllocator.getInstance().memcpyBlocking(tempShapes, new LongPointer(shapeInfoPointers), shapeInfoPointers.length * 8, 0);
    AtomicAllocator.getInstance().memcpyBlocking(tempTAD, new LongPointer(tadPointers), tadPointers.length * 8, 0);
    AtomicAllocator.getInstance().memcpyBlocking(tempOffsets, new LongPointer(offsetsPointers), offsetsPointers.length * 8, 0);
    Pointer dataPointer = AtomicAllocator.getInstance().getPointer(tempData, context);
    Pointer shapesPointer = AtomicAllocator.getInstance().getPointer(tempShapes, context);
    Pointer tadPointer = AtomicAllocator.getInstance().getPointer(tempTAD, context);
    Pointer offsetPointer = AtomicAllocator.getInstance().getPointer(tempOffsets, context);
    // System.out.println("ShapesPointer after conversion: " + shapesPointer);
    PointerPointer extras = new PointerPointer(AddressRetriever.retrieveHostPointer(ret.shapeInfoDataBuffer()), context.getOldStream(), allocator.getDeviceIdPointer(), context.getBufferAllocation(), context.getBufferReduction(), context.getBufferScalar(), context.getBufferSpecial(), AddressRetriever.retrieveHostPointer(toConcat[0].shapeInfoDataBuffer()), AddressRetriever.retrieveHostPointer(ret.shapeInfoDataBuffer()), new LongPointer(hostShapeInfoPointers), // getting zTADShape
    AtomicAllocator.getInstance().getPointer(zBuffers.getFirst(), context), // getting zOffset
    AtomicAllocator.getInstance().getPointer(zBuffers.getSecond(), context));
    if (ret.data().dataType() == DataBuffer.Type.DOUBLE) {
        nativeOps.concatDouble(extras, dimension, toConcat.length, new PointerPointer(new Pointer[] { dataPointer }), new PointerPointer(new Pointer[] { shapesPointer }), (DoublePointer) dZ, (IntPointer) dZShapeInfo, new PointerPointer(new Pointer[] { tadPointer }), new PointerPointer(new Pointer[] { offsetPointer }));
    } else if (ret.data().dataType() == DataBuffer.Type.FLOAT) {
        nativeOps.concatFloat(extras, dimension, toConcat.length, new PointerPointer(new Pointer[] { dataPointer }), new PointerPointer(new Pointer[] { shapesPointer }), (FloatPointer) dZ, (IntPointer) dZShapeInfo, new PointerPointer(new Pointer[] { tadPointer }), new PointerPointer(new Pointer[] { offsetPointer }));
    } else {
        nativeOps.concatHalf(extras, dimension, toConcat.length, new PointerPointer(new Pointer[] { dataPointer }), new PointerPointer(new Pointer[] { shapesPointer }), (ShortPointer) dZ, (IntPointer) dZShapeInfo, new PointerPointer(new Pointer[] { tadPointer }), new PointerPointer(new Pointer[] { offsetPointer }));
    }
    allocator.registerAction(context, ret, toConcat);
    return ret;
// return super.concat(dimension, toConcat);
}
Also used : AtomicAllocator(org.nd4j.jita.allocator.impl.AtomicAllocator) CudaContext(org.nd4j.linalg.jcublas.context.CudaContext) CudaPointer(org.nd4j.jita.allocator.pointers.CudaPointer) AllocationPoint(org.nd4j.jita.allocator.impl.AllocationPoint) GridExecutioner(org.nd4j.linalg.api.ops.executioner.GridExecutioner) INDArray(org.nd4j.linalg.api.ndarray.INDArray) CudaDoubleDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer) TADManager(org.nd4j.linalg.cache.TADManager) DataBuffer(org.nd4j.linalg.api.buffer.DataBuffer) CudaIntDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaIntDataBuffer) CompressedDataBuffer(org.nd4j.linalg.compression.CompressedDataBuffer) CudaDoubleDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer)

Example 2 with CudaDoubleDataBuffer

use of org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer in project nd4j by deeplearning4j.

the class JCublasNDArrayFactory method tear.

public INDArray[] tear(INDArray tensor, int... dimensions) {
    if (tensor.isCompressed())
        Nd4j.getCompressor().decompressi(tensor);
    Arrays.sort(dimensions);
    Pair<DataBuffer, DataBuffer> tadBuffers = Nd4j.getExecutioner().getTADManager().getTADOnlyShapeInfo(tensor, dimensions);
    long tadLength = 1;
    int[] shape = new int[dimensions.length];
    for (int i = 0; i < dimensions.length; i++) {
        tadLength *= tensor.shape()[dimensions[i]];
        shape[i] = tensor.shape()[dimensions[i]];
    }
    int numTads = (int) (tensor.lengthLong() / tadLength);
    INDArray[] result = new INDArray[numTads];
    long[] xPointers = new long[numTads];
    CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(null, tensor);
    for (int x = 0; x < numTads; x++) {
        result[x] = Nd4j.createUninitialized(shape);
        context = AtomicAllocator.getInstance().getFlowController().prepareAction(result[x]);
        xPointers[x] = AtomicAllocator.getInstance().getPointer(result[x], context).address();
    }
    CudaDoubleDataBuffer tempX = new CudaDoubleDataBuffer(numTads);
    AtomicAllocator.getInstance().memcpyBlocking(tempX, new LongPointer(xPointers), xPointers.length * 8, 0);
    PointerPointer extraz = new // not used
    PointerPointer(// not used
    null, context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer());
    if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) {
        nativeOps.tearDouble(extraz, (DoublePointer) AtomicAllocator.getInstance().getPointer(tensor, context), (IntPointer) AtomicAllocator.getInstance().getPointer(tensor.shapeInfoDataBuffer(), context), new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)), (IntPointer) AtomicAllocator.getInstance().getPointer(result[0].shapeInfoDataBuffer(), context), (IntPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)));
    } else if (Nd4j.dataType() == DataBuffer.Type.FLOAT) {
        nativeOps.tearFloat(extraz, (FloatPointer) AtomicAllocator.getInstance().getPointer(tensor, context), (IntPointer) AtomicAllocator.getInstance().getPointer(tensor.shapeInfoDataBuffer(), context), new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)), (IntPointer) AtomicAllocator.getInstance().getPointer(result[0].shapeInfoDataBuffer(), context), (IntPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)));
    } else if (Nd4j.dataType() == DataBuffer.Type.HALF) {
        nativeOps.tearHalf(extraz, (ShortPointer) AtomicAllocator.getInstance().getPointer(tensor, context), (IntPointer) AtomicAllocator.getInstance().getPointer(tensor.shapeInfoDataBuffer(), context), new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)), (IntPointer) AtomicAllocator.getInstance().getPointer(result[0].shapeInfoDataBuffer(), context), (IntPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)));
    }
    AtomicAllocator.getInstance().getFlowController().registerActionAllWrite(context, result);
    AtomicAllocator.getInstance().getFlowController().registerAction(context, null, result);
    return result;
}
Also used : CudaContext(org.nd4j.linalg.jcublas.context.CudaContext) AllocationPoint(org.nd4j.jita.allocator.impl.AllocationPoint) INDArray(org.nd4j.linalg.api.ndarray.INDArray) CudaDoubleDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer) LongPointerWrapper(org.nd4j.nativeblas.LongPointerWrapper) DataBuffer(org.nd4j.linalg.api.buffer.DataBuffer) CudaIntDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaIntDataBuffer) CompressedDataBuffer(org.nd4j.linalg.compression.CompressedDataBuffer) CudaDoubleDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer)

Example 3 with CudaDoubleDataBuffer

use of org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer in project nd4j by deeplearning4j.

the class JCublasNDArrayFactory method average.

@Override
public INDArray average(INDArray target, INDArray[] arrays) {
    if (arrays == null || arrays.length == 0)
        throw new RuntimeException("Input arrays are missing");
    if (arrays.length == 1)
        return target.assign(arrays[0]);
    // we do averaging on GPU only if ALL devices have p2p links
    if (nativeOps.isP2PAvailable() && CudaEnvironment.getInstance().getConfiguration().isCrossDeviceAccessAllowed()) {
        Nd4j.getExecutioner().push();
        long len = target != null ? target.lengthLong() : arrays[0].lengthLong();
        AtomicAllocator allocator = AtomicAllocator.getInstance();
        CudaContext context = allocator.getFlowController().prepareAction(target, arrays);
        PointerPointer extras = new // not used
        PointerPointer(// not used
        null, context.getOldStream(), allocator.getDeviceIdPointer(), new CudaPointer(0));
        Pointer z = target == null ? null : AtomicAllocator.getInstance().getPointer(target, context);
        long[] xPointers = new long[arrays.length];
        for (int i = 0; i < arrays.length; i++) {
            if (arrays[i].elementWiseStride() != 1)
                throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
            if (arrays[i].lengthLong() != len)
                throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
            AllocationPoint point = allocator.getAllocationPoint(arrays[i]);
            xPointers[i] = point.getPointers().getDevicePointer().address();
            point.tickDeviceWrite();
        }
        CudaDoubleDataBuffer tempX = new CudaDoubleDataBuffer(arrays.length);
        allocator.memcpyBlocking(tempX, new LongPointer(xPointers), xPointers.length * 8, 0);
        PointerPointer x = new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context));
        if (arrays[0].data().dataType() == DataBuffer.Type.DOUBLE) {
            nativeOps.averageDouble(extras, x, target == null ? null : (DoublePointer) z, arrays.length, len, true);
        } else if (arrays[0].data().dataType() == DataBuffer.Type.FLOAT) {
            nativeOps.averageFloat(extras, x, target == null ? null : (FloatPointer) z, arrays.length, len, true);
        } else {
            nativeOps.averageHalf(extras, x, target == null ? null : (ShortPointer) z, arrays.length, len, true);
        }
        allocator.getFlowController().registerAction(context, target, arrays);
        tempX.address();
        return target;
    } else {
        // otherwise we do averging on CPU side
        /**
         * We expect all operations are complete at this point
         */
        long len = target == null ? arrays[0].lengthLong() : target.lengthLong();
        CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext();
        PointerPointer dataPointers = new PointerPointer(arrays.length);
        PointerPointer extras = new // not used
        PointerPointer(// not used
        null, context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer(), new CudaPointer(1));
        for (int i = 0; i < arrays.length; i++) {
            Nd4j.getCompressor().autoDecompress(arrays[i]);
            if (arrays[i].elementWiseStride() != 1)
                throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
            if (arrays[i].lengthLong() != len)
                throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
            dataPointers.put(i, AtomicAllocator.getInstance().getHostPointer(arrays[i]));
        }
        if (arrays[0].data().dataType() == DataBuffer.Type.DOUBLE) {
            nativeOps.averageDouble(extras, dataPointers, target == null ? null : (DoublePointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len, true);
        } else if (arrays[0].data().dataType() == DataBuffer.Type.FLOAT) {
            nativeOps.averageFloat(extras, dataPointers, target == null ? null : (FloatPointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len, true);
        } else {
            nativeOps.averageHalf(extras, dataPointers, target == null ? null : (ShortPointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len, true);
        }
        if (target != null)
            AtomicAllocator.getInstance().getAllocationPoint(target).tickHostWrite();
        // TODO: make propagation optional maybe?
        if (true) {
            for (int i = 0; i < arrays.length; i++) {
                AtomicAllocator.getInstance().getAllocationPoint(arrays[i]).tickHostWrite();
            }
        }
        return target;
    }
}
Also used : AtomicAllocator(org.nd4j.jita.allocator.impl.AtomicAllocator) CudaContext(org.nd4j.linalg.jcublas.context.CudaContext) CudaPointer(org.nd4j.jita.allocator.pointers.CudaPointer) AllocationPoint(org.nd4j.jita.allocator.impl.AllocationPoint) AllocationPoint(org.nd4j.jita.allocator.impl.AllocationPoint) CudaDoubleDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer) ND4JIllegalStateException(org.nd4j.linalg.exception.ND4JIllegalStateException) CudaPointer(org.nd4j.jita.allocator.pointers.CudaPointer)

Example 4 with CudaDoubleDataBuffer

use of org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer in project nd4j by deeplearning4j.

the class JCublasNDArrayFactory method accumulate.

public INDArray accumulate(INDArray target, INDArray... arrays) {
    if (arrays == null || arrays.length == 0)
        throw new RuntimeException("Input arrays are missing");
    if (arrays.length == 1)
        return target.assign(arrays[0]);
    // we do averaging on GPU only if ALL devices have p2p links
    if (CudaEnvironment.getInstance().getConfiguration().isCrossDeviceAccessAllowed() && nativeOps.isP2PAvailable()) {
        Nd4j.getExecutioner().push();
        long len = target.lengthLong();
        AtomicAllocator allocator = AtomicAllocator.getInstance();
        CudaContext context = allocator.getFlowController().prepareAction(target, arrays);
        PointerPointer extras = new // not used
        PointerPointer(// not used
        null, context.getOldStream(), allocator.getDeviceIdPointer(), new CudaPointer(0));
        Pointer z = AtomicAllocator.getInstance().getPointer(target, context);
        long[] xPointers = new long[arrays.length];
        for (int i = 0; i < arrays.length; i++) {
            if (arrays[i].elementWiseStride() != 1)
                throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
            if (arrays[i].lengthLong() != len)
                throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
            AllocationPoint point = allocator.getAllocationPoint(arrays[i]);
            xPointers[i] = point.getPointers().getDevicePointer().address();
            point.tickDeviceWrite();
        }
        CudaDoubleDataBuffer tempX = new CudaDoubleDataBuffer(arrays.length);
        allocator.memcpyBlocking(tempX, new LongPointer(xPointers), xPointers.length * 8, 0);
        PointerPointer x = new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context));
        if (target.data().dataType() == DataBuffer.Type.DOUBLE) {
            nativeOps.accumulateDouble(extras, x, (DoublePointer) z, arrays.length, len);
        } else if (target.data().dataType() == DataBuffer.Type.FLOAT) {
            nativeOps.accumulateFloat(extras, x, (FloatPointer) z, arrays.length, len);
        } else {
            nativeOps.accumulateHalf(extras, x, (ShortPointer) z, arrays.length, len);
        }
        allocator.getFlowController().registerAction(context, target, arrays);
        tempX.address();
        return target;
    } else {
        long len = target.lengthLong();
        Nd4j.getExecutioner().commit();
        CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext();
        PointerPointer dataPointers = new PointerPointer(arrays.length);
        PointerPointer extras = new // not used
        PointerPointer(// not used
        null, context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer(), new CudaPointer(1));
        for (int i = 0; i < arrays.length; i++) {
            Nd4j.getCompressor().autoDecompress(arrays[i]);
            if (arrays[i].elementWiseStride() != 1)
                throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
            if (arrays[i].lengthLong() != len)
                throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
            dataPointers.put(i, AtomicAllocator.getInstance().getHostPointer(arrays[i]));
        }
        if (target.data().dataType() == DataBuffer.Type.DOUBLE) {
            nativeOps.accumulateDouble(extras, dataPointers, (DoublePointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len);
        } else if (target.data().dataType() == DataBuffer.Type.FLOAT) {
            nativeOps.accumulateFloat(extras, dataPointers, (FloatPointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len);
        } else {
            nativeOps.accumulateHalf(extras, dataPointers, (ShortPointer) AtomicAllocator.getInstance().getHostPointer(target), arrays.length, len);
        }
        AtomicAllocator.getInstance().getAllocationPoint(target).tickHostWrite();
        return target;
    }
}
Also used : AtomicAllocator(org.nd4j.jita.allocator.impl.AtomicAllocator) CudaContext(org.nd4j.linalg.jcublas.context.CudaContext) CudaPointer(org.nd4j.jita.allocator.pointers.CudaPointer) AllocationPoint(org.nd4j.jita.allocator.impl.AllocationPoint) AllocationPoint(org.nd4j.jita.allocator.impl.AllocationPoint) CudaDoubleDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer) ND4JIllegalStateException(org.nd4j.linalg.exception.ND4JIllegalStateException) CudaPointer(org.nd4j.jita.allocator.pointers.CudaPointer)

Example 5 with CudaDoubleDataBuffer

use of org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer in project nd4j by deeplearning4j.

the class JCublasNDArrayFactory method shuffle.

/**
 * Symmetric in place shuffle of an ndarray
 * along a specified set of dimensions. Each array in list should have it's own dimension at the same index of dimensions array
 *
 * @param arrays      the ndarrays to shuffle
 * @param dimensions the dimensions to do the shuffle
 * @return
 */
@Override
public void shuffle(List<INDArray> arrays, Random rnd, List<int[]> dimensions) {
    // no dimension - no shuffle
    if (dimensions == null || dimensions.size() == 0)
        throw new RuntimeException("Dimension can't be null or 0-length");
    if (arrays == null || arrays.size() == 0)
        throw new RuntimeException("No input arrays provided");
    if (dimensions.size() > 1 && arrays.size() != dimensions.size())
        throw new IllegalStateException("Number of dimensions do not match number of arrays to shuffle");
    Nd4j.getExecutioner().push();
    // first we build TAD for input array and dimensions
    AtomicAllocator allocator = AtomicAllocator.getInstance();
    CudaContext context = null;
    for (int x = 0; x < arrays.size(); x++) {
        context = allocator.getFlowController().prepareAction(arrays.get(x));
    }
    int tadLength = 1;
    for (int i = 0; i < dimensions.get(0).length; i++) {
        tadLength *= arrays.get(0).shape()[dimensions.get(0)[i]];
    }
    int numTads = arrays.get(0).length() / tadLength;
    int[] map = ArrayUtil.buildInterleavedVector(rnd, numTads);
    CudaIntDataBuffer shuffle = new CudaIntDataBuffer(map);
    Pointer shuffleMap = allocator.getPointer(shuffle, context);
    PointerPointer extras = new // not used
    PointerPointer(// not used
    null, context.getOldStream(), allocator.getDeviceIdPointer());
    long[] xPointers = new long[arrays.size()];
    long[] xShapes = new long[arrays.size()];
    long[] tadShapes = new long[arrays.size()];
    long[] tadOffsets = new long[arrays.size()];
    for (int i = 0; i < arrays.size(); i++) {
        INDArray array = arrays.get(i);
        Pointer x = AtomicAllocator.getInstance().getPointer(array, context);
        Pointer xShapeInfo = AtomicAllocator.getInstance().getPointer(array.shapeInfoDataBuffer(), context);
        TADManager tadManager = Nd4j.getExecutioner().getTADManager();
        int[] dimension = dimensions.size() > 1 ? dimensions.get(i) : dimensions.get(0);
        Pair<DataBuffer, DataBuffer> tadBuffers = tadManager.getTADOnlyShapeInfo(array, dimension);
        // log.info("Original shape: {}; dimension: {}; TAD shape: {}", array.shapeInfoDataBuffer().asInt(), dimension, tadBuffers.getFirst().asInt());
        Pointer tadShapeInfo = AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context);
        DataBuffer offsets = tadBuffers.getSecond();
        if (offsets.length() != numTads)
            throw new ND4JIllegalStateException("Can't symmetrically shuffle arrays with non-equal number of TADs");
        Pointer tadOffset = AtomicAllocator.getInstance().getPointer(offsets, context);
        xPointers[i] = x.address();
        xShapes[i] = xShapeInfo.address();
        tadShapes[i] = tadShapeInfo.address();
        tadOffsets[i] = tadOffset.address();
    }
    CudaDoubleDataBuffer tempX = new CudaDoubleDataBuffer(arrays.size());
    CudaDoubleDataBuffer tempShapes = new CudaDoubleDataBuffer(arrays.size());
    CudaDoubleDataBuffer tempTAD = new CudaDoubleDataBuffer(arrays.size());
    CudaDoubleDataBuffer tempOffsets = new CudaDoubleDataBuffer(arrays.size());
    AtomicAllocator.getInstance().memcpyBlocking(tempX, new LongPointer(xPointers), xPointers.length * 8, 0);
    AtomicAllocator.getInstance().memcpyBlocking(tempShapes, new LongPointer(xShapes), xPointers.length * 8, 0);
    AtomicAllocator.getInstance().memcpyBlocking(tempTAD, new LongPointer(tadShapes), xPointers.length * 8, 0);
    AtomicAllocator.getInstance().memcpyBlocking(tempOffsets, new LongPointer(tadOffsets), xPointers.length * 8, 0);
    if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) {
        nativeOps.shuffleDouble(extras, new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), arrays.size(), (IntPointer) shuffleMap, new PointerPointer(allocator.getPointer(tempTAD, context)), new PointerPointer(allocator.getPointer(tempOffsets, context)));
    } else if (Nd4j.dataType() == DataBuffer.Type.FLOAT) {
        nativeOps.shuffleFloat(extras, new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), arrays.size(), (IntPointer) shuffleMap, new PointerPointer(allocator.getPointer(tempTAD, context)), new PointerPointer(allocator.getPointer(tempOffsets, context)));
    } else {
        // HALFs
        nativeOps.shuffleHalf(extras, new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), arrays.size(), (IntPointer) shuffleMap, new PointerPointer(allocator.getPointer(tempTAD, context)), new PointerPointer(allocator.getPointer(tempOffsets, context)));
    }
    for (int f = 0; f < arrays.size(); f++) {
        allocator.getFlowController().registerAction(context, arrays.get(f));
    }
    // just to keep reference
    shuffle.address();
    tempX.dataType();
    tempShapes.dataType();
    tempOffsets.dataType();
    tempTAD.dataType();
}
Also used : ND4JIllegalStateException(org.nd4j.linalg.exception.ND4JIllegalStateException) AtomicAllocator(org.nd4j.jita.allocator.impl.AtomicAllocator) CudaContext(org.nd4j.linalg.jcublas.context.CudaContext) CudaPointer(org.nd4j.jita.allocator.pointers.CudaPointer) CudaIntDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaIntDataBuffer) AllocationPoint(org.nd4j.jita.allocator.impl.AllocationPoint) INDArray(org.nd4j.linalg.api.ndarray.INDArray) CudaDoubleDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer) ND4JIllegalStateException(org.nd4j.linalg.exception.ND4JIllegalStateException) TADManager(org.nd4j.linalg.cache.TADManager) DataBuffer(org.nd4j.linalg.api.buffer.DataBuffer) CudaIntDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaIntDataBuffer) CompressedDataBuffer(org.nd4j.linalg.compression.CompressedDataBuffer) CudaDoubleDataBuffer(org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer)

Aggregations

CudaDoubleDataBuffer (org.nd4j.linalg.jcublas.buffer.CudaDoubleDataBuffer)7 AllocationPoint (org.nd4j.jita.allocator.impl.AllocationPoint)5 CudaContext (org.nd4j.linalg.jcublas.context.CudaContext)5 AtomicAllocator (org.nd4j.jita.allocator.impl.AtomicAllocator)4 CudaPointer (org.nd4j.jita.allocator.pointers.CudaPointer)4 DataBuffer (org.nd4j.linalg.api.buffer.DataBuffer)4 CompressedDataBuffer (org.nd4j.linalg.compression.CompressedDataBuffer)4 CudaIntDataBuffer (org.nd4j.linalg.jcublas.buffer.CudaIntDataBuffer)4 INDArray (org.nd4j.linalg.api.ndarray.INDArray)3 ND4JIllegalStateException (org.nd4j.linalg.exception.ND4JIllegalStateException)3 TADManager (org.nd4j.linalg.cache.TADManager)2 LongPointer (org.bytedeco.javacpp.LongPointer)1 GridExecutioner (org.nd4j.linalg.api.ops.executioner.GridExecutioner)1 LongPointerWrapper (org.nd4j.nativeblas.LongPointerWrapper)1