use of org.apache.sysml.runtime.matrix.operators.BinaryOperator in project incubator-systemml by apache.
the class CompressedMatrixBlock method chainMatrixMultOperations.
@Override
public MatrixBlock chainMatrixMultOperations(MatrixBlock v, MatrixBlock w, MatrixBlock out, ChainType ctype, int k) {
// call uncompressed matrix mult if necessary
if (!isCompressed()) {
return super.chainMatrixMultOperations(v, w, out, ctype, k);
}
// multi-threaded mmchain of single uncompressed colgroup
if (isSingleUncompressedGroup()) {
return ((ColGroupUncompressed) _colGroups.get(0)).getData().chainMatrixMultOperations(v, w, out, ctype, k);
}
Timing time = LOG.isDebugEnabled() ? new Timing(true) : null;
// prepare result
if (out != null)
out.reset(clen, 1, false);
else
out = new MatrixBlock(clen, 1, false);
// empty block handling
if (isEmptyBlock(false))
return out;
// compute matrix mult
MatrixBlock tmp = new MatrixBlock(rlen, 1, false);
rightMultByVector(v, tmp, k);
if (ctype == ChainType.XtwXv) {
BinaryOperator bop = new BinaryOperator(Multiply.getMultiplyFnObject());
LibMatrixBincell.bincellOpInPlace(tmp, w, bop);
}
leftMultByVectorTranspose(_colGroups, tmp, out, true, k);
if (LOG.isDebugEnabled())
LOG.debug("Compressed MMChain k=" + k + " in " + time.stop());
return out;
}
use of org.apache.sysml.runtime.matrix.operators.BinaryOperator in project incubator-systemml by apache.
the class CompressedMatrixBlock method chainMatrixMultOperations.
@Override
public MatrixBlock chainMatrixMultOperations(MatrixBlock v, MatrixBlock w, MatrixBlock out, ChainType ctype) {
// call uncompressed matrix mult if necessary
if (!isCompressed()) {
return super.chainMatrixMultOperations(v, w, out, ctype);
}
// single-threaded mmchain of single uncompressed colgroup
if (isSingleUncompressedGroup()) {
return ((ColGroupUncompressed) _colGroups.get(0)).getData().chainMatrixMultOperations(v, w, out, ctype);
}
// prepare result
if (out != null)
out.reset(clen, 1, false);
else
out = new MatrixBlock(clen, 1, false);
// empty block handling
if (isEmptyBlock(false))
return out;
// compute matrix mult
MatrixBlock tmp = new MatrixBlock(rlen, 1, false);
rightMultByVector(v, tmp);
if (ctype == ChainType.XtwXv) {
BinaryOperator bop = new BinaryOperator(Multiply.getMultiplyFnObject());
LibMatrixBincell.bincellOpInPlace(tmp, w, bop);
}
leftMultByVectorTranspose(_colGroups, tmp, out, true, true);
return out;
}
use of org.apache.sysml.runtime.matrix.operators.BinaryOperator in project incubator-systemml by apache.
the class BuiltinBinaryGPUInstruction method parseInstruction.
public static BuiltinBinaryGPUInstruction parseInstruction(String str) {
CPOperand in1 = new CPOperand("", Expression.ValueType.UNKNOWN, Expression.DataType.UNKNOWN);
CPOperand in2 = new CPOperand("", Expression.ValueType.UNKNOWN, Expression.DataType.UNKNOWN);
CPOperand out = new CPOperand("", Expression.ValueType.UNKNOWN, Expression.DataType.UNKNOWN);
String[] parts = InstructionUtils.getInstructionPartsWithValueType(str);
InstructionUtils.checkNumFields(parts, 3);
String opcode = parts[0];
in1.split(parts[1]);
in2.split(parts[2]);
out.split(parts[3]);
// check for valid data type of output
if ((in1.getDataType() == Expression.DataType.MATRIX || in2.getDataType() == Expression.DataType.MATRIX) && out.getDataType() != Expression.DataType.MATRIX)
throw new DMLRuntimeException("Element-wise matrix operations between variables " + in1.getName() + " and " + in2.getName() + " must produce a matrix, which " + out.getName() + " is not");
// Determine appropriate Function Object based on opcode
ValueFunction func = Builtin.getBuiltinFnObject(opcode);
boolean isMatrixMatrix = in1.getDataType() == Expression.DataType.MATRIX && in2.getDataType() == Expression.DataType.MATRIX;
boolean isMatrixScalar = (in1.getDataType() == Expression.DataType.MATRIX && in2.getDataType() == Expression.DataType.SCALAR) || (in1.getDataType() == Expression.DataType.SCALAR && in2.getDataType() == Expression.DataType.MATRIX);
if (in1.getDataType() == Expression.DataType.SCALAR && in2.getDataType() == Expression.DataType.SCALAR)
throw new DMLRuntimeException("GPU : Unsupported GPU builtin operations on 2 scalars");
else if (isMatrixMatrix && opcode.equals("solve"))
return new MatrixMatrixBuiltinGPUInstruction(new BinaryOperator(func), in1, in2, out, opcode, str, 2);
else if (isMatrixScalar && (opcode.equals("min") || opcode.equals("max")))
return new ScalarMatrixBuiltinGPUInstruction(new BinaryOperator(func), in1, in2, out, opcode, str, 2);
else
throw new DMLRuntimeException("GPU : Unsupported GPU builtin operations on a matrix and a scalar:" + opcode);
}
use of org.apache.sysml.runtime.matrix.operators.BinaryOperator in project incubator-systemml by apache.
the class MatrixMatrixArithmeticGPUInstruction method processInstruction.
@Override
public void processInstruction(ExecutionContext ec) {
GPUStatistics.incrementNoOfExecutedGPUInst();
MatrixObject in1 = getMatrixInputForGPUInstruction(ec, _input1.getName());
MatrixObject in2 = getMatrixInputForGPUInstruction(ec, _input2.getName());
// TODO: make hop level changes for this
boolean isLeftTransposed = false;
boolean isRightTransposed = false;
long rlen1 = in1.getNumRows();
long clen1 = in1.getNumColumns();
long rlen2 = in2.getNumRows();
long clen2 = in2.getNumColumns();
// Assume ordinary binary op
long rlen = rlen1;
long clen = clen1;
// Outer binary op ( [100,1] + [1,100] or [100,100] + [100,1]
if (rlen1 != rlen2 || clen1 != clen2) {
rlen = rlen1 > rlen2 ? rlen1 : rlen2;
clen = clen1 > clen2 ? clen1 : clen2;
}
ec.setMetaData(_output.getName(), (int) rlen, (int) clen);
BinaryOperator bop = (BinaryOperator) _optr;
LibMatrixCUDA.matrixMatrixArithmetic(ec, ec.getGPUContext(0), getExtendedOpcode(), in1, in2, _output.getName(), isLeftTransposed, isRightTransposed, bop);
ec.releaseMatrixInputForGPUInstruction(_input1.getName());
ec.releaseMatrixInputForGPUInstruction(_input2.getName());
ec.releaseMatrixOutputForGPUInstruction(_output.getName());
}
use of org.apache.sysml.runtime.matrix.operators.BinaryOperator in project incubator-systemml by apache.
the class MatrixMatrixRelationalBinaryGPUInstruction method processInstruction.
@Override
public void processInstruction(ExecutionContext ec) {
GPUStatistics.incrementNoOfExecutedGPUInst();
MatrixObject in1 = getMatrixInputForGPUInstruction(ec, _input1.getName());
MatrixObject in2 = getMatrixInputForGPUInstruction(ec, _input2.getName());
long rlen1 = in1.getNumRows();
long clen1 = in1.getNumColumns();
long rlen2 = in2.getNumRows();
long clen2 = in2.getNumColumns();
// Assume ordinary binary op
long rlen = rlen1;
long clen = clen1;
// Outer binary op ( [100,1] + [1,100] or [100,100] + [100,1]
if (rlen1 != rlen2 || clen1 != clen2) {
rlen = rlen1 > rlen2 ? rlen1 : rlen2;
clen = clen1 > clen2 ? clen1 : clen2;
}
ec.setMetaData(_output.getName(), (int) rlen, (int) clen);
BinaryOperator bop = (BinaryOperator) _optr;
LibMatrixCUDA.matrixMatrixRelational(ec, ec.getGPUContext(0), getExtendedOpcode(), in1, in2, _output.getName(), bop);
ec.releaseMatrixInputForGPUInstruction(_input1.getName());
ec.releaseMatrixInputForGPUInstruction(_input2.getName());
ec.releaseMatrixOutputForGPUInstruction(_output.getName());
}
Aggregations