use of org.apache.sysml.lops.Lop in project incubator-systemml by apache.
the class AggBinaryOp method constructSparkLopsCPMMWithLeftTransposeRewrite.
private Lop constructSparkLopsCPMMWithLeftTransposeRewrite() {
SparkAggType aggtype = getSparkMMAggregationType(true);
// guaranteed to exists
Hop X = getInput().get(0).getInput().get(0);
Hop Y = getInput().get(1);
// right vector transpose CP
Lop tY = new Transform(Y.constructLops(), OperationTypes.Transpose, getDataType(), getValueType(), ExecType.CP);
tY.getOutputParameters().setDimensions(Y.getDim2(), Y.getDim1(), getRowsInBlock(), getColsInBlock(), Y.getNnz());
setLineNumbers(tY);
// matrix multiply
MMCJ mmcj = new MMCJ(tY, X.constructLops(), getDataType(), getValueType(), aggtype, ExecType.SPARK);
mmcj.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
setLineNumbers(mmcj);
// result transpose CP
Lop out = new Transform(mmcj, OperationTypes.Transpose, getDataType(), getValueType(), ExecType.CP);
out.getOutputParameters().setDimensions(X.getDim2(), Y.getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
return out;
}
use of org.apache.sysml.lops.Lop in project incubator-systemml by apache.
the class AggBinaryOp method constructCPLopsMM.
private void constructCPLopsMM(ExecType et) {
Lop matmultCP = null;
if (et == ExecType.GPU) {
Hop h1 = getInput().get(0);
Hop h2 = getInput().get(1);
// Since GPU backend is in experimental mode, rewrite optimization can be skipped.
// CuSPARSE's cusparsecsrmm2 fails with only following parameters, but passes for all other settings:
// transa=1 transb=1 m=300 n=300 k=300 ldb=300 ldc=300
// Hence, we disable hope rewrite optimization.
// HopRewriteUtils.isTransposeOperation(h1);
boolean leftTrans = false;
// HopRewriteUtils.isTransposeOperation(h2);
boolean rightTrans = false;
Lop left = !leftTrans ? h1.constructLops() : h1.getInput().get(0).constructLops();
Lop right = !rightTrans ? h2.constructLops() : h2.getInput().get(0).constructLops();
matmultCP = new Binary(left, right, Binary.OperationTypes.MATMULT, getDataType(), getValueType(), et, leftTrans, rightTrans);
setOutputDimensions(matmultCP);
} else {
if (isLeftTransposeRewriteApplicable(true, false)) {
matmultCP = constructCPLopsMMWithLeftTransposeRewrite();
} else {
int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
matmultCP = new Binary(getInput().get(0).constructLops(), getInput().get(1).constructLops(), Binary.OperationTypes.MATMULT, getDataType(), getValueType(), et, k);
}
setOutputDimensions(matmultCP);
}
setLineNumbers(matmultCP);
setLops(matmultCP);
}
use of org.apache.sysml.lops.Lop in project incubator-systemml by apache.
the class AggBinaryOp method constructSparkLopsCPMM.
private void constructSparkLopsCPMM() {
if (isLeftTransposeRewriteApplicable(false, false)) {
setLops(constructSparkLopsCPMMWithLeftTransposeRewrite());
} else {
SparkAggType aggtype = getSparkMMAggregationType(true);
Lop cpmm = new MMCJ(getInput().get(0).constructLops(), getInput().get(1).constructLops(), getDataType(), getValueType(), aggtype, ExecType.SPARK);
setOutputDimensions(cpmm);
setLineNumbers(cpmm);
setLops(cpmm);
}
}
use of org.apache.sysml.lops.Lop in project incubator-systemml by apache.
the class AggBinaryOp method constructCPLopsTSMM.
// ////////////////////////
// CP Lops generation
// ///////////////////////
private void constructCPLopsTSMM(MMTSJType mmtsj, ExecType et) {
int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
Lop matmultCP = new MMTSJ(getInput().get(mmtsj.isLeft() ? 1 : 0).constructLops(), getDataType(), getValueType(), et, mmtsj, false, k);
matmultCP.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
setLineNumbers(matmultCP);
setLops(matmultCP);
}
use of org.apache.sysml.lops.Lop in project incubator-systemml by apache.
the class AggBinaryOp method constructMRLopsCPMMWithLeftTransposeRewrite.
private Lop constructMRLopsCPMMWithLeftTransposeRewrite() {
// guaranteed to exists
Hop X = getInput().get(0).getInput().get(0);
Hop Y = getInput().get(1);
// right vector transpose CP
Lop tY = new Transform(Y.constructLops(), OperationTypes.Transpose, getDataType(), getValueType(), ExecType.CP);
tY.getOutputParameters().setDimensions(Y.getDim2(), Y.getDim1(), getRowsInBlock(), getColsInBlock(), Y.getNnz());
setLineNumbers(tY);
// matrix multiply
MMCJType type = getMMCJAggregationType(X, Y);
MMCJ mmcj = new MMCJ(tY, X.constructLops(), getDataType(), getValueType(), type, ExecType.MR);
setOutputDimensions(mmcj);
setLineNumbers(mmcj);
Group grp = new Group(mmcj, Group.OperationTypes.Sort, getDataType(), getValueType());
setOutputDimensions(grp);
setLineNumbers(grp);
Aggregate agg1 = new Aggregate(grp, HopsAgg2Lops.get(outerOp), getDataType(), getValueType(), ExecType.MR);
setOutputDimensions(agg1);
setLineNumbers(agg1);
// aggregation uses kahanSum but the inputs do not have correction values
agg1.setupCorrectionLocation(CorrectionLocationType.NONE);
// result transpose CP
Lop out = new Transform(agg1, OperationTypes.Transpose, getDataType(), getValueType(), ExecType.CP);
out.getOutputParameters().setDimensions(X.getDim2(), Y.getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
return out;
}
Aggregations