use of org.nd4j.linalg.api.buffer.DataBuffer in project nd4j by deeplearning4j.
the class BaseShapeInfoProvider method createShapeInformation.
@Override
public Pair<DataBuffer, int[]> createShapeInformation(int[] shape, int[] stride, long offset, int elementWiseStride, char order) {
DataBuffer buffer = Shape.createShapeInformation(shape, stride, offset, elementWiseStride, order);
buffer.setConstant(true);
return Pair.create(buffer, buffer.asInt());
}
use of org.nd4j.linalg.api.buffer.DataBuffer in project nd4j by deeplearning4j.
the class BaseSparseNDArray method reallocate.
protected DataBuffer reallocate(DataBuffer buffer) {
int newSize = (int) buffer.length() * 2;
DataBuffer newBuffer = Nd4j.createBuffer(newSize);
switch(buffer.dataType()) {
case INT:
newBuffer.setData(buffer.asInt());
break;
case DOUBLE:
newBuffer.setData(buffer.asDouble());
break;
case FLOAT:
newBuffer.setData(buffer.asFloat());
break;
case HALF:
// TODO
throw new UnsupportedOperationException();
case COMPRESSED:
// TODO
throw new UnsupportedOperationException();
default:
throw new UnsupportedOperationException();
}
return newBuffer;
}
use of org.nd4j.linalg.api.buffer.DataBuffer in project nd4j by deeplearning4j.
the class BaseOp method extraArgsBuff.
@Override
public Buffer extraArgsBuff() {
if (extraArgs != null) {
DataBuffer retBuff;
if (x.data().dataType() == DataBuffer.Type.FLOAT) {
retBuff = Nd4j.createBuffer(new float[extraArgs.length]);
for (int i = 0; i < extraArgs.length; i++) {
Number val = (Number) extraArgs[i];
retBuff.put(i, val.floatValue());
}
return retBuff.asNioFloat();
} else {
retBuff = Nd4j.createBuffer(new double[extraArgs.length]);
for (int i = 0; i < extraArgs.length; i++) {
Number val = (Number) extraArgs[i];
retBuff.put(i, val.doubleValue());
}
return retBuff.asNioDouble();
}
}
return null;
}
use of org.nd4j.linalg.api.buffer.DataBuffer in project nd4j by deeplearning4j.
the class SparseBaseLevel1 method axpy.
/**
* Adds a scalar multiple of compressed sparse vector to a full-storage vector.
*
* @param n The number of element
* @param alpha
* @param x a sparse vector
* @param y a dense vector
*/
@Override
public void axpy(int n, double alpha, INDArray x, INDArray y) {
BaseSparseNDArray sparseX = (BaseSparseNDArray) x;
DataBuffer pointers = sparseX.getVectorCoordinates();
switch(x.data().dataType()) {
case DOUBLE:
DefaultOpExecutioner.validateDataType(DataBuffer.Type.DOUBLE, x);
DefaultOpExecutioner.validateDataType(DataBuffer.Type.DOUBLE, y);
daxpyi(n, alpha, x, pointers, y);
break;
case FLOAT:
DefaultOpExecutioner.validateDataType(DataBuffer.Type.FLOAT, x);
DefaultOpExecutioner.validateDataType(DataBuffer.Type.FLOAT, y);
saxpyi(n, alpha, x, pointers, y);
break;
case HALF:
DefaultOpExecutioner.validateDataType(DataBuffer.Type.HALF, x);
DefaultOpExecutioner.validateDataType(DataBuffer.Type.HALF, y);
haxpyi(n, alpha, x, pointers, y);
break;
default:
throw new UnsupportedOperationException();
}
}
use of org.nd4j.linalg.api.buffer.DataBuffer in project nd4j by deeplearning4j.
the class SparseBaseLevel1 method dot.
/**
* computes a vector-vector dot product.
*
* @param n number of accessed element
* @param alpha
* @param X an INDArray
* @param Y an INDArray
* @return the vector-vector dot product of X and Y
*/
@Override
public double dot(int n, double alpha, INDArray X, INDArray Y) {
if (X instanceof BaseSparseNDArray) {
BaseSparseNDArray sparseX = (BaseSparseNDArray) X;
DataBuffer pointers = sparseX.getVectorCoordinates();
switch(X.data().dataType()) {
case DOUBLE:
DefaultOpExecutioner.validateDataType(DataBuffer.Type.DOUBLE, X, Y);
return ddoti(n, X, pointers, Y);
case FLOAT:
DefaultOpExecutioner.validateDataType(DataBuffer.Type.FLOAT, X, Y);
return sdoti(n, X, pointers, Y);
case HALF:
DefaultOpExecutioner.validateDataType(DataBuffer.Type.HALF, X, Y);
return hdoti(n, X, pointers, Y);
default:
}
}
throw new UnsupportedOperationException();
}
Aggregations