use of org.apache.sysml.runtime.matrix.operators.ScalarOperator in project incubator-systemml by apache.
the class BasicScalarOperationsTest method runScalarOperationsTest.
private static void runScalarOperationsTest(SparsityType sptype, ValueType vtype, boolean compress) {
try {
// prepare sparsity for input data
double sparsity = -1;
switch(sptype) {
case DENSE:
sparsity = sparsity1;
break;
case SPARSE:
sparsity = sparsity2;
break;
case EMPTY:
sparsity = sparsity3;
break;
}
// generate input data
double min = (vtype == ValueType.CONST) ? 10 : -10;
double[][] input = TestUtils.generateTestMatrix(rows, cols, min, 10, sparsity, 7);
if (vtype == ValueType.RAND_ROUND_OLE || vtype == ValueType.RAND_ROUND_DDC) {
CompressedMatrixBlock.ALLOW_DDC_ENCODING = (vtype == ValueType.RAND_ROUND_DDC);
input = TestUtils.round(input);
}
MatrixBlock mb = DataConverter.convertToMatrixBlock(input);
// compress given matrix block
CompressedMatrixBlock cmb = new CompressedMatrixBlock(mb);
if (compress)
cmb.compress();
// matrix-scalar uncompressed
ScalarOperator sop = new RightScalarOperator(Multiply.getMultiplyFnObject(), 7);
MatrixBlock ret1 = (MatrixBlock) mb.scalarOperations(sop, new MatrixBlock());
// matrix-scalar compressed
MatrixBlock ret2 = (MatrixBlock) cmb.scalarOperations(sop, new MatrixBlock());
if (compress)
ret2 = ((CompressedMatrixBlock) ret2).decompress();
// compare result with input
double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
TestUtils.compareMatrices(d1, d2, rows, cols, 0.0000001);
} catch (Exception ex) {
throw new RuntimeException(ex);
} finally {
CompressedMatrixBlock.ALLOW_DDC_ENCODING = true;
}
}
use of org.apache.sysml.runtime.matrix.operators.ScalarOperator in project incubator-systemml by apache.
the class BasicScalarOperationsSparseUnsafeTest method runScalarOperationsTest.
private static void runScalarOperationsTest(SparsityType sptype, ValueType vtype, boolean compress) {
try {
// prepare sparsity for input data
double sparsity = -1;
switch(sptype) {
case DENSE:
sparsity = sparsity1;
break;
case SPARSE:
sparsity = sparsity2;
break;
case EMPTY:
sparsity = sparsity3;
break;
}
// generate input data
double min = (vtype == ValueType.CONST) ? 10 : -10;
double[][] input = TestUtils.generateTestMatrix(rows, cols, min, 10, sparsity, 7);
if (vtype == ValueType.RAND_ROUND_OLE || vtype == ValueType.RAND_ROUND_DDC) {
CompressedMatrixBlock.ALLOW_DDC_ENCODING = (vtype == ValueType.RAND_ROUND_DDC);
input = TestUtils.round(input);
}
MatrixBlock mb = DataConverter.convertToMatrixBlock(input);
// compress given matrix block
CompressedMatrixBlock cmb = new CompressedMatrixBlock(mb);
if (compress)
cmb.compress();
// matrix-scalar uncompressed
ScalarOperator sop = new RightScalarOperator(Plus.getPlusFnObject(), 7);
MatrixBlock ret1 = (MatrixBlock) mb.scalarOperations(sop, new MatrixBlock());
// matrix-scalar compressed
MatrixBlock ret2 = (MatrixBlock) cmb.scalarOperations(sop, new MatrixBlock());
if (compress)
ret2 = ((CompressedMatrixBlock) ret2).decompress();
// compare result with input
double[][] d1 = DataConverter.convertToDoubleMatrix(ret1);
double[][] d2 = DataConverter.convertToDoubleMatrix(ret2);
TestUtils.compareMatrices(d1, d2, rows, cols, 0.0000001);
} catch (Exception ex) {
throw new RuntimeException(ex);
} finally {
CompressedMatrixBlock.ALLOW_DDC_ENCODING = true;
}
}
use of org.apache.sysml.runtime.matrix.operators.ScalarOperator in project incubator-systemml by apache.
the class ScalarMatrixArithmeticGPUInstruction method processInstruction.
@Override
public void processInstruction(ExecutionContext ec) {
GPUStatistics.incrementNoOfExecutedGPUInst();
CPOperand mat = (_input1.getDataType() == DataType.MATRIX) ? _input1 : _input2;
CPOperand scalar = (_input1.getDataType() == DataType.MATRIX) ? _input2 : _input1;
MatrixObject in1 = getMatrixInputForGPUInstruction(ec, mat.getName());
ScalarObject constant = (ScalarObject) ec.getScalarInput(scalar.getName(), scalar.getValueType(), scalar.isLiteral());
boolean isTransposed = false;
int rlen = isTransposed ? (int) in1.getNumColumns() : (int) in1.getNumRows();
int clen = isTransposed ? (int) in1.getNumRows() : (int) in1.getNumColumns();
ec.setMetaData(_output.getName(), rlen, clen);
ScalarOperator sc_op = (ScalarOperator) _optr;
sc_op = sc_op.setConstant(constant.getDoubleValue());
LibMatrixCUDA.matrixScalarArithmetic(ec, ec.getGPUContext(0), getExtendedOpcode(), in1, _output.getName(), isTransposed, sc_op);
ec.releaseMatrixInputForGPUInstruction(mat.getName());
ec.releaseMatrixOutputForGPUInstruction(_output.getName());
}
use of org.apache.sysml.runtime.matrix.operators.ScalarOperator in project incubator-systemml by apache.
the class ScalarMatrixRelationalBinaryGPUInstruction method processInstruction.
@Override
public void processInstruction(ExecutionContext ec) {
GPUStatistics.incrementNoOfExecutedGPUInst();
CPOperand mat = (_input1.getDataType() == Expression.DataType.MATRIX) ? _input1 : _input2;
CPOperand scalar = (_input1.getDataType() == Expression.DataType.MATRIX) ? _input2 : _input1;
MatrixObject in1 = getMatrixInputForGPUInstruction(ec, mat.getName());
ScalarObject constant = (ScalarObject) ec.getScalarInput(scalar.getName(), scalar.getValueType(), scalar.isLiteral());
int rlen = (int) in1.getNumRows();
int clen = (int) in1.getNumColumns();
ec.setMetaData(_output.getName(), rlen, clen);
ScalarOperator sc_op = (ScalarOperator) _optr;
sc_op = sc_op.setConstant(constant.getDoubleValue());
LibMatrixCUDA.matrixScalarRelational(ec, ec.getGPUContext(0), getExtendedOpcode(), in1, _output.getName(), sc_op);
ec.releaseMatrixInputForGPUInstruction(mat.getName());
ec.releaseMatrixOutputForGPUInstruction(_output.getName());
}
use of org.apache.sysml.runtime.matrix.operators.ScalarOperator in project incubator-systemml by apache.
the class ScalarInstruction method parseInstruction.
public static ScalarInstruction parseInstruction(String str) {
InstructionUtils.checkNumFields(str, 3);
String[] parts = InstructionUtils.getInstructionParts(str);
String opcode = parts[0];
boolean firstArgScalar = isFirstArgumentScalar(str);
double cst = Double.parseDouble(firstArgScalar ? parts[1] : parts[2]);
byte in = Byte.parseByte(firstArgScalar ? parts[2] : parts[1]);
byte out = Byte.parseByte(parts[3]);
ScalarOperator sop = InstructionUtils.parseScalarBinaryOperator(opcode, firstArgScalar, cst);
return new ScalarInstruction(sop, in, out, str);
}
Aggregations