Search in sources :

Example 26 with Group

use of org.apache.sysml.lops.Group in project systemml by apache.

the class AggBinaryOp method constructMRLopsCPMMWithLeftTransposeRewrite.

private Lop constructMRLopsCPMMWithLeftTransposeRewrite() {
    // guaranteed to exists
    Hop X = getInput().get(0).getInput().get(0);
    Hop Y = getInput().get(1);
    // right vector transpose CP
    Lop tY = new Transform(Y.constructLops(), OperationTypes.Transpose, getDataType(), getValueType(), ExecType.CP);
    tY.getOutputParameters().setDimensions(Y.getDim2(), Y.getDim1(), getRowsInBlock(), getColsInBlock(), Y.getNnz());
    setLineNumbers(tY);
    // matrix multiply
    MMCJType type = getMMCJAggregationType(X, Y);
    MMCJ mmcj = new MMCJ(tY, X.constructLops(), getDataType(), getValueType(), type, ExecType.MR);
    setOutputDimensions(mmcj);
    setLineNumbers(mmcj);
    Group grp = new Group(mmcj, Group.OperationTypes.Sort, getDataType(), getValueType());
    setOutputDimensions(grp);
    setLineNumbers(grp);
    Aggregate agg1 = new Aggregate(grp, HopsAgg2Lops.get(outerOp), getDataType(), getValueType(), ExecType.MR);
    setOutputDimensions(agg1);
    setLineNumbers(agg1);
    // aggregation uses kahanSum but the inputs do not have correction values
    agg1.setupCorrectionLocation(CorrectionLocationType.NONE);
    // result transpose CP
    Lop out = new Transform(agg1, OperationTypes.Transpose, getDataType(), getValueType(), ExecType.CP);
    out.getOutputParameters().setDimensions(X.getDim2(), Y.getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
    return out;
}
Also used : Group(org.apache.sysml.lops.Group) MMCJ(org.apache.sysml.lops.MMCJ) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) MMCJType(org.apache.sysml.lops.MMCJ.MMCJType) Lop(org.apache.sysml.lops.Lop) Transform(org.apache.sysml.lops.Transform) Aggregate(org.apache.sysml.lops.Aggregate)

Example 27 with Group

use of org.apache.sysml.lops.Group in project systemml by apache.

the class AggBinaryOp method constructMRLopsMapMM.

// ////////////////////////
// MR Lops generation
// ///////////////////////
private void constructMRLopsMapMM(MMultMethod method) {
    if (method == MMultMethod.MAPMM_R && isLeftTransposeRewriteApplicable(false, true)) {
        setLops(constructMRLopsMapMMWithLeftTransposeRewrite());
    } else // GENERAL CASE
    {
        // If number of columns is smaller than block size then explicit aggregation is not required.
        // i.e., entire matrix multiplication can be performed in the mappers.
        boolean needAgg = requiresAggregation(method);
        boolean needPart = requiresPartitioning(method, false);
        _outputEmptyBlocks = !OptimizerUtils.allowsToFilterEmptyBlockOutputs(this);
        // pre partitioning
        Lop leftInput = getInput().get(0).constructLops();
        Lop rightInput = getInput().get(1).constructLops();
        if (needPart) {
            if (// left in distributed cache
            (method == MMultMethod.MAPMM_L)) {
                Hop input = getInput().get(0);
                ExecType etPart = (OptimizerUtils.estimateSizeExactSparsity(input.getDim1(), input.getDim2(), OptimizerUtils.getSparsity(input.getDim1(), input.getDim2(), input.getNnz())) < OptimizerUtils.getLocalMemBudget()) ? ExecType.CP : // operator selection
                ExecType.MR;
                leftInput = new DataPartition(input.constructLops(), DataType.MATRIX, ValueType.DOUBLE, etPart, PDataPartitionFormat.COLUMN_BLOCK_WISE_N);
                leftInput.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), getRowsInBlock(), getColsInBlock(), input.getNnz());
                setLineNumbers(leftInput);
            } else // right side in distributed cache
            {
                Hop input = getInput().get(1);
                ExecType etPart = (OptimizerUtils.estimateSizeExactSparsity(input.getDim1(), input.getDim2(), OptimizerUtils.getSparsity(input.getDim1(), input.getDim2(), input.getNnz())) < OptimizerUtils.getLocalMemBudget()) ? ExecType.CP : // operator selection
                ExecType.MR;
                rightInput = new DataPartition(input.constructLops(), DataType.MATRIX, ValueType.DOUBLE, etPart, PDataPartitionFormat.ROW_BLOCK_WISE_N);
                rightInput.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), getRowsInBlock(), getColsInBlock(), input.getNnz());
                setLineNumbers(rightInput);
            }
        }
        // core matrix mult
        MapMult mapmult = new MapMult(leftInput, rightInput, getDataType(), getValueType(), (method == MMultMethod.MAPMM_R), needPart, _outputEmptyBlocks);
        mapmult.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
        setLineNumbers(mapmult);
        // post aggregation
        if (needAgg) {
            Group grp = new Group(mapmult, Group.OperationTypes.Sort, getDataType(), getValueType());
            Aggregate agg1 = new Aggregate(grp, HopsAgg2Lops.get(outerOp), getDataType(), getValueType(), ExecType.MR);
            grp.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
            agg1.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
            setLineNumbers(agg1);
            // aggregation uses kahanSum but the inputs do not have correction values
            agg1.setupCorrectionLocation(CorrectionLocationType.NONE);
            setLops(agg1);
        } else {
            setLops(mapmult);
        }
    }
}
Also used : Group(org.apache.sysml.lops.Group) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) PMapMult(org.apache.sysml.lops.PMapMult) MapMult(org.apache.sysml.lops.MapMult) ExecType(org.apache.sysml.lops.LopProperties.ExecType) Lop(org.apache.sysml.lops.Lop) Aggregate(org.apache.sysml.lops.Aggregate) DataPartition(org.apache.sysml.lops.DataPartition)

Example 28 with Group

use of org.apache.sysml.lops.Group in project systemml by apache.

the class UnaryOp method constructLopsMRCumulativeUnary.

/**
 * MR Cumsum is currently based on a multipass algorithm of (1) preaggregation and (2) subsequent offsetting.
 * Note that we currently support one robust physical operator but many alternative
 * realizations are possible for specific scenarios (e.g., when the preaggregated intermediate
 * fit into the map task memory budget) or by creating custom job types.
 *
 * @return low-level operator
 */
private Lop constructLopsMRCumulativeUnary() {
    Hop input = getInput().get(0);
    long rlen = input.getDim1();
    long clen = input.getDim2();
    long brlen = input.getRowsInBlock();
    long bclen = input.getColsInBlock();
    boolean force = !dimsKnown() || _etypeForced == ExecType.MR;
    OperationTypes aggtype = getCumulativeAggType();
    Lop X = input.constructLops();
    Lop TEMP = X;
    ArrayList<Lop> DATA = new ArrayList<>();
    int level = 0;
    // recursive preaggregation until aggregates fit into CP memory budget
    while (((2 * OptimizerUtils.estimateSize(TEMP.getOutputParameters().getNumRows(), clen) + OptimizerUtils.estimateSize(1, clen)) > OptimizerUtils.getLocalMemBudget() && TEMP.getOutputParameters().getNumRows() > 1) || force) {
        DATA.add(TEMP);
        // preaggregation per block
        long rlenAgg = (long) Math.ceil((double) TEMP.getOutputParameters().getNumRows() / brlen);
        Lop preagg = new CumulativePartialAggregate(TEMP, DataType.MATRIX, ValueType.DOUBLE, aggtype, ExecType.MR);
        preagg.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
        setLineNumbers(preagg);
        Group group = new Group(preagg, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        group.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
        setLineNumbers(group);
        Aggregate agg = new Aggregate(group, HopsAgg2Lops.get(AggOp.SUM), getDataType(), getValueType(), ExecType.MR);
        agg.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
        // aggregation uses kahanSum but the inputs do not have correction values
        agg.setupCorrectionLocation(CorrectionLocationType.NONE);
        setLineNumbers(agg);
        TEMP = agg;
        level++;
        // in case of unknowns, generate one level
        force = false;
    }
    // in-memory cum sum (of partial aggregates)
    if (TEMP.getOutputParameters().getNumRows() != 1) {
        int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
        Unary unary1 = new Unary(TEMP, HopsOpOp1LopsU.get(_op), DataType.MATRIX, ValueType.DOUBLE, ExecType.CP, k);
        unary1.getOutputParameters().setDimensions(TEMP.getOutputParameters().getNumRows(), clen, brlen, bclen, -1);
        setLineNumbers(unary1);
        TEMP = unary1;
    }
    // split, group and mr cumsum
    while (level-- > 0) {
        double init = getCumulativeInitValue();
        CumulativeSplitAggregate split = new CumulativeSplitAggregate(TEMP, DataType.MATRIX, ValueType.DOUBLE, init);
        split.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(split);
        Group group1 = new Group(DATA.get(level), Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        group1.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(group1);
        Group group2 = new Group(split, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        group2.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(group2);
        CumulativeOffsetBinary binary = new CumulativeOffsetBinary(group1, group2, DataType.MATRIX, ValueType.DOUBLE, aggtype, ExecType.MR);
        binary.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(binary);
        TEMP = binary;
    }
    return TEMP;
}
Also used : Group(org.apache.sysml.lops.Group) CumulativeSplitAggregate(org.apache.sysml.lops.CumulativeSplitAggregate) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) ArrayList(java.util.ArrayList) Lop(org.apache.sysml.lops.Lop) CombineUnary(org.apache.sysml.lops.CombineUnary) Unary(org.apache.sysml.lops.Unary) CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate) OperationTypes(org.apache.sysml.lops.Aggregate.OperationTypes) CumulativeOffsetBinary(org.apache.sysml.lops.CumulativeOffsetBinary) PartialAggregate(org.apache.sysml.lops.PartialAggregate) CumulativeSplitAggregate(org.apache.sysml.lops.CumulativeSplitAggregate) Aggregate(org.apache.sysml.lops.Aggregate) CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate)

Example 29 with Group

use of org.apache.sysml.lops.Group in project systemml by apache.

the class UnaryOp method constructLopsIQM.

private Lop constructLopsIQM() {
    ExecType et = optFindExecType();
    Hop input = getInput().get(0);
    if (et == ExecType.MR) {
        CombineUnary combine = CombineUnary.constructCombineLop(input.constructLops(), DataType.MATRIX, getValueType());
        combine.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), input.getRowsInBlock(), input.getColsInBlock(), input.getNnz());
        SortKeys sort = SortKeys.constructSortByValueLop(combine, SortKeys.OperationTypes.WithoutWeights, DataType.MATRIX, ValueType.DOUBLE, ExecType.MR);
        // Sort dimensions are same as the first input
        sort.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), input.getRowsInBlock(), input.getColsInBlock(), input.getNnz());
        Data lit = Data.createLiteralLop(ValueType.DOUBLE, Double.toString(0.25));
        lit.setAllPositions(this.getFilename(), this.getBeginLine(), this.getBeginColumn(), this.getEndLine(), this.getEndColumn());
        PickByCount pick = new PickByCount(sort, lit, DataType.MATRIX, getValueType(), PickByCount.OperationTypes.RANGEPICK);
        pick.getOutputParameters().setDimensions(-1, -1, getRowsInBlock(), getColsInBlock(), -1);
        setLineNumbers(pick);
        PartialAggregate pagg = new PartialAggregate(pick, HopsAgg2Lops.get(Hop.AggOp.SUM), HopsDirection2Lops.get(Hop.Direction.RowCol), DataType.MATRIX, getValueType());
        setLineNumbers(pagg);
        // Set the dimensions of PartialAggregate LOP based on the
        // direction in which aggregation is performed
        pagg.setDimensionsBasedOnDirection(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock());
        Group group1 = new Group(pagg, Group.OperationTypes.Sort, DataType.MATRIX, getValueType());
        group1.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
        setLineNumbers(group1);
        Aggregate agg1 = new Aggregate(group1, HopsAgg2Lops.get(Hop.AggOp.SUM), DataType.MATRIX, getValueType(), ExecType.MR);
        agg1.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
        agg1.setupCorrectionLocation(pagg.getCorrectionLocation());
        setLineNumbers(agg1);
        UnaryCP unary1 = new UnaryCP(agg1, HopsOpOp1LopsUS.get(OpOp1.CAST_AS_SCALAR), getDataType(), getValueType());
        unary1.getOutputParameters().setDimensions(0, 0, 0, 0, -1);
        setLineNumbers(unary1);
        Unary iqm = new Unary(sort, unary1, Unary.OperationTypes.MR_IQM, DataType.SCALAR, ValueType.DOUBLE, ExecType.CP);
        iqm.getOutputParameters().setDimensions(0, 0, 0, 0, -1);
        setLineNumbers(iqm);
        return iqm;
    } else {
        SortKeys sort = SortKeys.constructSortByValueLop(input.constructLops(), SortKeys.OperationTypes.WithoutWeights, DataType.MATRIX, ValueType.DOUBLE, et);
        sort.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), input.getRowsInBlock(), input.getColsInBlock(), input.getNnz());
        PickByCount pick = new PickByCount(sort, null, getDataType(), getValueType(), PickByCount.OperationTypes.IQM, et, true);
        pick.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
        setLineNumbers(pick);
        return pick;
    }
}
Also used : PartialAggregate(org.apache.sysml.lops.PartialAggregate) CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate) SortKeys(org.apache.sysml.lops.SortKeys) Group(org.apache.sysml.lops.Group) PickByCount(org.apache.sysml.lops.PickByCount) CombineUnary(org.apache.sysml.lops.CombineUnary) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) ExecType(org.apache.sysml.lops.LopProperties.ExecType) Data(org.apache.sysml.lops.Data) PartialAggregate(org.apache.sysml.lops.PartialAggregate) CumulativeSplitAggregate(org.apache.sysml.lops.CumulativeSplitAggregate) Aggregate(org.apache.sysml.lops.Aggregate) CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate) CombineUnary(org.apache.sysml.lops.CombineUnary) Unary(org.apache.sysml.lops.Unary) UnaryCP(org.apache.sysml.lops.UnaryCP)

Example 30 with Group

use of org.apache.sysml.lops.Group in project incubator-systemml by apache.

the class AggBinaryOp method constructMRLopsMapMM.

// ////////////////////////
// MR Lops generation
// ///////////////////////
private void constructMRLopsMapMM(MMultMethod method) {
    if (method == MMultMethod.MAPMM_R && isLeftTransposeRewriteApplicable(false, true)) {
        setLops(constructMRLopsMapMMWithLeftTransposeRewrite());
    } else // GENERAL CASE
    {
        // If number of columns is smaller than block size then explicit aggregation is not required.
        // i.e., entire matrix multiplication can be performed in the mappers.
        boolean needAgg = requiresAggregation(method);
        boolean needPart = requiresPartitioning(method, false);
        _outputEmptyBlocks = !OptimizerUtils.allowsToFilterEmptyBlockOutputs(this);
        // pre partitioning
        Lop leftInput = getInput().get(0).constructLops();
        Lop rightInput = getInput().get(1).constructLops();
        if (needPart) {
            if (// left in distributed cache
            (method == MMultMethod.MAPMM_L)) {
                Hop input = getInput().get(0);
                ExecType etPart = (OptimizerUtils.estimateSizeExactSparsity(input.getDim1(), input.getDim2(), OptimizerUtils.getSparsity(input.getDim1(), input.getDim2(), input.getNnz())) < OptimizerUtils.getLocalMemBudget()) ? ExecType.CP : // operator selection
                ExecType.MR;
                leftInput = new DataPartition(input.constructLops(), DataType.MATRIX, ValueType.DOUBLE, etPart, PDataPartitionFormat.COLUMN_BLOCK_WISE_N);
                leftInput.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), getRowsInBlock(), getColsInBlock(), input.getNnz());
                setLineNumbers(leftInput);
            } else // right side in distributed cache
            {
                Hop input = getInput().get(1);
                ExecType etPart = (OptimizerUtils.estimateSizeExactSparsity(input.getDim1(), input.getDim2(), OptimizerUtils.getSparsity(input.getDim1(), input.getDim2(), input.getNnz())) < OptimizerUtils.getLocalMemBudget()) ? ExecType.CP : // operator selection
                ExecType.MR;
                rightInput = new DataPartition(input.constructLops(), DataType.MATRIX, ValueType.DOUBLE, etPart, PDataPartitionFormat.ROW_BLOCK_WISE_N);
                rightInput.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), getRowsInBlock(), getColsInBlock(), input.getNnz());
                setLineNumbers(rightInput);
            }
        }
        // core matrix mult
        MapMult mapmult = new MapMult(leftInput, rightInput, getDataType(), getValueType(), (method == MMultMethod.MAPMM_R), needPart, _outputEmptyBlocks);
        mapmult.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
        setLineNumbers(mapmult);
        // post aggregation
        if (needAgg) {
            Group grp = new Group(mapmult, Group.OperationTypes.Sort, getDataType(), getValueType());
            Aggregate agg1 = new Aggregate(grp, HopsAgg2Lops.get(outerOp), getDataType(), getValueType(), ExecType.MR);
            grp.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
            agg1.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
            setLineNumbers(agg1);
            // aggregation uses kahanSum but the inputs do not have correction values
            agg1.setupCorrectionLocation(CorrectionLocationType.NONE);
            setLops(agg1);
        } else {
            setLops(mapmult);
        }
    }
}
Also used : Group(org.apache.sysml.lops.Group) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) PMapMult(org.apache.sysml.lops.PMapMult) MapMult(org.apache.sysml.lops.MapMult) ExecType(org.apache.sysml.lops.LopProperties.ExecType) Lop(org.apache.sysml.lops.Lop) Aggregate(org.apache.sysml.lops.Aggregate) DataPartition(org.apache.sysml.lops.DataPartition)

Aggregations

Group (org.apache.sysml.lops.Group)55 Lop (org.apache.sysml.lops.Lop)45 Aggregate (org.apache.sysml.lops.Aggregate)38 MultiThreadedHop (org.apache.sysml.hops.Hop.MultiThreadedHop)32 DataPartition (org.apache.sysml.lops.DataPartition)28 ExecType (org.apache.sysml.lops.LopProperties.ExecType)25 UnaryCP (org.apache.sysml.lops.UnaryCP)14 RepMat (org.apache.sysml.lops.RepMat)11 PartialAggregate (org.apache.sysml.lops.PartialAggregate)10 Unary (org.apache.sysml.lops.Unary)10 CombineUnary (org.apache.sysml.lops.CombineUnary)8 Transform (org.apache.sysml.lops.Transform)8 AppendR (org.apache.sysml.lops.AppendR)6 Data (org.apache.sysml.lops.Data)6 GroupedAggregate (org.apache.sysml.lops.GroupedAggregate)6 SortKeys (org.apache.sysml.lops.SortKeys)6 ArrayList (java.util.ArrayList)4 SparkAggType (org.apache.sysml.hops.AggBinaryOp.SparkAggType)4 OperationTypes (org.apache.sysml.lops.Aggregate.OperationTypes)4 Binary (org.apache.sysml.lops.Binary)4