Search in sources :

Example 26 with Aggregate

use of org.apache.sysml.lops.Aggregate in project incubator-systemml by apache.

the class BinaryOp method constructLopsIQM.

private void constructLopsIQM(ExecType et) {
    if (et == ExecType.MR) {
        CombineBinary combine = CombineBinary.constructCombineLop(OperationTypes.PreSort, (Lop) getInput().get(0).constructLops(), (Lop) getInput().get(1).constructLops(), DataType.MATRIX, getValueType());
        combine.getOutputParameters().setDimensions(getInput().get(0).getDim1(), getInput().get(0).getDim2(), getInput().get(0).getRowsInBlock(), getInput().get(0).getColsInBlock(), getInput().get(0).getNnz());
        SortKeys sort = SortKeys.constructSortByValueLop(combine, SortKeys.OperationTypes.WithWeights, DataType.MATRIX, ValueType.DOUBLE, ExecType.MR);
        // Sort dimensions are same as the first input
        sort.getOutputParameters().setDimensions(getInput().get(0).getDim1(), getInput().get(0).getDim2(), getInput().get(0).getRowsInBlock(), getInput().get(0).getColsInBlock(), getInput().get(0).getNnz());
        Data lit = Data.createLiteralLop(ValueType.DOUBLE, Double.toString(0.25));
        setLineNumbers(lit);
        PickByCount pick = new PickByCount(sort, lit, DataType.MATRIX, getValueType(), PickByCount.OperationTypes.RANGEPICK);
        pick.getOutputParameters().setDimensions(-1, -1, getRowsInBlock(), getColsInBlock(), -1);
        setLineNumbers(pick);
        PartialAggregate pagg = new PartialAggregate(pick, HopsAgg2Lops.get(Hop.AggOp.SUM), HopsDirection2Lops.get(Hop.Direction.RowCol), DataType.MATRIX, getValueType());
        setLineNumbers(pagg);
        // Set the dimensions of PartialAggregate LOP based on the
        // direction in which aggregation is performed
        pagg.setDimensionsBasedOnDirection(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock());
        Group group1 = new Group(pagg, Group.OperationTypes.Sort, DataType.MATRIX, getValueType());
        setOutputDimensions(group1);
        setLineNumbers(group1);
        Aggregate agg1 = new Aggregate(group1, HopsAgg2Lops.get(Hop.AggOp.SUM), DataType.MATRIX, getValueType(), ExecType.MR);
        setOutputDimensions(agg1);
        agg1.setupCorrectionLocation(pagg.getCorrectionLocation());
        setLineNumbers(agg1);
        UnaryCP unary1 = new UnaryCP(agg1, HopsOpOp1LopsUS.get(OpOp1.CAST_AS_SCALAR), DataType.SCALAR, getValueType());
        unary1.getOutputParameters().setDimensions(0, 0, 0, 0, -1);
        setLineNumbers(unary1);
        Unary iqm = new Unary(sort, unary1, Unary.OperationTypes.MR_IQM, DataType.SCALAR, ValueType.DOUBLE, ExecType.CP);
        iqm.getOutputParameters().setDimensions(0, 0, 0, 0, -1);
        setLineNumbers(iqm);
        setLops(iqm);
    } else {
        SortKeys sort = SortKeys.constructSortByValueLop(getInput().get(0).constructLops(), getInput().get(1).constructLops(), SortKeys.OperationTypes.WithWeights, getInput().get(0).getDataType(), getInput().get(0).getValueType(), et);
        sort.getOutputParameters().setDimensions(getInput().get(0).getDim1(), getInput().get(0).getDim2(), getInput().get(0).getRowsInBlock(), getInput().get(0).getColsInBlock(), getInput().get(0).getNnz());
        PickByCount pick = new PickByCount(sort, null, getDataType(), getValueType(), PickByCount.OperationTypes.IQM, et, true);
        setOutputDimensions(pick);
        setLineNumbers(pick);
        setLops(pick);
    }
}
Also used : PartialAggregate(org.apache.sysml.lops.PartialAggregate) CombineBinary(org.apache.sysml.lops.CombineBinary) SortKeys(org.apache.sysml.lops.SortKeys) Group(org.apache.sysml.lops.Group) PickByCount(org.apache.sysml.lops.PickByCount) Data(org.apache.sysml.lops.Data) PartialAggregate(org.apache.sysml.lops.PartialAggregate) Aggregate(org.apache.sysml.lops.Aggregate) Unary(org.apache.sysml.lops.Unary) CombineUnary(org.apache.sysml.lops.CombineUnary) UnaryCP(org.apache.sysml.lops.UnaryCP)

Example 27 with Aggregate

use of org.apache.sysml.lops.Aggregate in project incubator-systemml by apache.

the class BinaryOp method constructMRAppendLop.

/**
 * General case binary append.
 *
 * @param left high-level operator left
 * @param right high-level operator right
 * @param dt data type
 * @param vt value type
 * @param cbind true if cbind
 * @param current current high-level operator
 * @return low-level operator
 */
public static Lop constructMRAppendLop(Hop left, Hop right, DataType dt, ValueType vt, boolean cbind, Hop current) {
    Lop ret = null;
    long m1_dim1 = left.getDim1();
    long m1_dim2 = left.getDim2();
    long m2_dim1 = right.getDim1();
    long m2_dim2 = right.getDim2();
    // output rows
    long m3_dim1 = cbind ? m1_dim1 : ((m1_dim1 >= 0 && m2_dim1 >= 0) ? (m1_dim1 + m2_dim1) : -1);
    // output cols
    long m3_dim2 = cbind ? ((m1_dim2 >= 0 && m2_dim2 >= 0) ? (m1_dim2 + m2_dim2) : -1) : m1_dim2;
    // output nnz
    long m3_nnz = (left.getNnz() > 0 && right.getNnz() > 0) ? (left.getNnz() + right.getNnz()) : -1;
    long brlen = left.getRowsInBlock();
    long bclen = left.getColsInBlock();
    // offset 1st input
    Lop offset = createOffsetLop(left, cbind);
    AppendMethod am = optFindAppendMethod(m1_dim1, m1_dim2, m2_dim1, m2_dim2, brlen, bclen, cbind);
    switch(am) {
        case // special case map-only append
        MR_MAPPEND:
            {
                boolean needPart = requiresPartitioning(right);
                // pre partitioning
                Lop dcInput = right.constructLops();
                if (needPart) {
                    // right side in distributed cache
                    ExecType etPart = (OptimizerUtils.estimateSizeExactSparsity(right.getDim1(), right.getDim2(), OptimizerUtils.getSparsity(right.getDim1(), right.getDim2(), right.getNnz())) < OptimizerUtils.getLocalMemBudget()) ? ExecType.CP : // operator selection
                    ExecType.MR;
                    dcInput = new DataPartition(dcInput, DataType.MATRIX, ValueType.DOUBLE, etPart, PDataPartitionFormat.ROW_BLOCK_WISE_N);
                    dcInput.getOutputParameters().setDimensions(right.getDim1(), right.getDim2(), right.getRowsInBlock(), right.getColsInBlock(), right.getNnz());
                    dcInput.setAllPositions(right.getFilename(), right.getBeginLine(), right.getBeginColumn(), right.getEndLine(), right.getEndColumn());
                }
                AppendM appM = new AppendM(left.constructLops(), dcInput, offset, dt, vt, cbind, needPart, ExecType.MR);
                appM.setAllPositions(current.getFilename(), current.getBeginLine(), current.getBeginColumn(), current.getEndLine(), current.getEndColumn());
                appM.getOutputParameters().setDimensions(m3_dim1, m3_dim2, brlen, bclen, m3_nnz);
                ret = appM;
                break;
            }
        case // special case reduce append w/ one column block
        MR_RAPPEND:
            {
                // group
                Group group1 = new Group(left.constructLops(), Group.OperationTypes.Sort, DataType.MATRIX, vt);
                group1.getOutputParameters().setDimensions(m1_dim1, m1_dim2, brlen, bclen, left.getNnz());
                group1.setAllPositions(left.getFilename(), left.getBeginLine(), left.getBeginColumn(), left.getEndLine(), left.getEndColumn());
                Group group2 = new Group(right.constructLops(), Group.OperationTypes.Sort, DataType.MATRIX, vt);
                group1.getOutputParameters().setDimensions(m2_dim1, m2_dim2, brlen, bclen, right.getNnz());
                group1.setAllPositions(right.getFilename(), right.getBeginLine(), right.getBeginColumn(), right.getEndLine(), right.getEndColumn());
                AppendR appR = new AppendR(group1, group2, dt, vt, cbind, ExecType.MR);
                appR.getOutputParameters().setDimensions(m3_dim1, m3_dim2, brlen, bclen, m3_nnz);
                appR.setAllPositions(current.getFilename(), current.getBeginLine(), current.getBeginColumn(), current.getEndLine(), current.getEndColumn());
                ret = appR;
                break;
            }
        case MR_GAPPEND:
            {
                // general case: map expand append, reduce aggregate
                // offset second input
                Lop offset2 = createOffsetLop(right, cbind);
                AppendG appG = new AppendG(left.constructLops(), right.constructLops(), offset, offset2, dt, vt, cbind, ExecType.MR);
                appG.getOutputParameters().setDimensions(m3_dim1, m3_dim2, brlen, bclen, m3_nnz);
                appG.setAllPositions(current.getFilename(), current.getBeginLine(), current.getBeginColumn(), current.getEndLine(), current.getEndColumn());
                // group
                Group group1 = new Group(appG, Group.OperationTypes.Sort, DataType.MATRIX, vt);
                group1.getOutputParameters().setDimensions(m3_dim1, m3_dim2, brlen, bclen, m3_nnz);
                group1.setAllPositions(current.getFilename(), current.getBeginLine(), current.getBeginColumn(), current.getEndLine(), current.getEndColumn());
                // aggregate
                Aggregate agg1 = new Aggregate(group1, Aggregate.OperationTypes.Sum, DataType.MATRIX, vt, ExecType.MR);
                agg1.getOutputParameters().setDimensions(m3_dim1, m3_dim2, brlen, bclen, m3_nnz);
                agg1.setAllPositions(current.getFilename(), current.getBeginLine(), current.getBeginColumn(), current.getEndLine(), current.getEndColumn());
                ret = agg1;
                break;
            }
        default:
            throw new HopsException("Invalid MR append method: " + am);
    }
    return ret;
}
Also used : AppendG(org.apache.sysml.lops.AppendG) Group(org.apache.sysml.lops.Group) AppendR(org.apache.sysml.lops.AppendR) ExecType(org.apache.sysml.lops.LopProperties.ExecType) Lop(org.apache.sysml.lops.Lop) PartialAggregate(org.apache.sysml.lops.PartialAggregate) Aggregate(org.apache.sysml.lops.Aggregate) DataPartition(org.apache.sysml.lops.DataPartition) AppendM(org.apache.sysml.lops.AppendM)

Example 28 with Aggregate

use of org.apache.sysml.lops.Aggregate in project incubator-systemml by apache.

the class ParameterizedBuiltinOp method constructLopsRExpand.

private void constructLopsRExpand(HashMap<String, Lop> inputlops, ExecType et) {
    if (et == ExecType.CP || et == ExecType.SPARK) {
        int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
        ParameterizedBuiltin pbilop = new ParameterizedBuiltin(inputlops, HopsParameterizedBuiltinLops.get(_op), getDataType(), getValueType(), et, k);
        setOutputDimensions(pbilop);
        setLineNumbers(pbilop);
        setLops(pbilop);
    } else if (et == ExecType.MR) {
        ParameterizedBuiltin pbilop = new ParameterizedBuiltin(inputlops, HopsParameterizedBuiltinLops.get(_op), getDataType(), getValueType(), et);
        setOutputDimensions(pbilop);
        setLineNumbers(pbilop);
        Group group1 = new Group(pbilop, Group.OperationTypes.Sort, getDataType(), getValueType());
        setOutputDimensions(group1);
        setLineNumbers(group1);
        Aggregate finalagg = new Aggregate(group1, Aggregate.OperationTypes.Sum, DataType.MATRIX, getValueType(), ExecType.MR);
        setOutputDimensions(finalagg);
        setLineNumbers(finalagg);
        setLops(finalagg);
    }
}
Also used : Group(org.apache.sysml.lops.Group) ParameterizedBuiltin(org.apache.sysml.lops.ParameterizedBuiltin) GroupedAggregate(org.apache.sysml.lops.GroupedAggregate) Aggregate(org.apache.sysml.lops.Aggregate)

Example 29 with Aggregate

use of org.apache.sysml.lops.Aggregate in project incubator-systemml by apache.

the class QuaternaryOp method constructMRLopsWeightedSquaredLoss.

private void constructMRLopsWeightedSquaredLoss(WeightsType wtype) {
    // NOTE: the common case for wsloss are factors U/V with a rank of 10s to 100s; the current runtime only
    // supports single block outer products (U/V rank <= blocksize, i.e., 1000 by default); we enforce this
    // by applying the hop rewrite for Weighted Squared Loss only if this constraint holds.
    Hop X = getInput().get(0);
    Hop U = getInput().get(1);
    Hop V = getInput().get(2);
    Hop W = getInput().get(3);
    // MR operator selection, part1
    // size U
    double m1Size = OptimizerUtils.estimateSize(U.getDim1(), U.getDim2());
    // size V
    double m2Size = OptimizerUtils.estimateSize(V.getDim1(), V.getDim2());
    boolean isMapWsloss = (!wtype.hasFourInputs() && m1Size + m2Size < OptimizerUtils.getRemoteMemBudgetMap(true));
    if (// broadcast
    !FORCE_REPLICATION && isMapWsloss) {
        // partitioning of U
        boolean needPartU = !U.dimsKnown() || U.getDim1() * U.getDim2() > DistributedCacheInput.PARTITION_SIZE;
        Lop lU = U.constructLops();
        if (needPartU) {
            // requires partitioning
            lU = new DataPartition(lU, DataType.MATRIX, ValueType.DOUBLE, (m1Size > OptimizerUtils.getLocalMemBudget()) ? ExecType.MR : ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);
            lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), getRowsInBlock(), getColsInBlock(), U.getNnz());
            setLineNumbers(lU);
        }
        // partitioning of V
        boolean needPartV = !V.dimsKnown() || V.getDim1() * V.getDim2() > DistributedCacheInput.PARTITION_SIZE;
        Lop lV = V.constructLops();
        if (needPartV) {
            // requires partitioning
            lV = new DataPartition(lV, DataType.MATRIX, ValueType.DOUBLE, (m2Size > OptimizerUtils.getLocalMemBudget()) ? ExecType.MR : ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);
            lV.getOutputParameters().setDimensions(V.getDim1(), V.getDim2(), getRowsInBlock(), getColsInBlock(), V.getNnz());
            setLineNumbers(lV);
        }
        // map-side wsloss always with broadcast
        Lop wsloss = new WeightedSquaredLoss(X.constructLops(), lU, lV, W.constructLops(), DataType.MATRIX, ValueType.DOUBLE, wtype, ExecType.MR);
        wsloss.getOutputParameters().setDimensions(1, 1, X.getRowsInBlock(), X.getColsInBlock(), -1);
        setLineNumbers(wsloss);
        Group grp = new Group(wsloss, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        grp.getOutputParameters().setDimensions(1, 1, X.getRowsInBlock(), X.getColsInBlock(), -1);
        setLineNumbers(grp);
        Aggregate agg1 = new Aggregate(grp, HopsAgg2Lops.get(AggOp.SUM), DataType.MATRIX, ValueType.DOUBLE, ExecType.MR);
        // aggregation uses kahanSum
        agg1.setupCorrectionLocation(CorrectionLocationType.NONE);
        agg1.getOutputParameters().setDimensions(1, 1, X.getRowsInBlock(), X.getColsInBlock(), -1);
        setLineNumbers(agg1);
        UnaryCP unary1 = new UnaryCP(agg1, HopsOpOp1LopsUS.get(OpOp1.CAST_AS_SCALAR), getDataType(), getValueType());
        unary1.getOutputParameters().setDimensions(0, 0, 0, 0, -1);
        setLineNumbers(unary1);
        setLops(unary1);
    } else // general case
    {
        // MR operator selection part 2
        boolean cacheU = !FORCE_REPLICATION && (m1Size < OptimizerUtils.getRemoteMemBudgetReduce());
        boolean cacheV = !FORCE_REPLICATION && ((!cacheU && m2Size < OptimizerUtils.getRemoteMemBudgetReduce()) || (cacheU && m1Size + m2Size < OptimizerUtils.getRemoteMemBudgetReduce()));
        Group grpX = new Group(X.constructLops(), Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        grpX.getOutputParameters().setDimensions(X.getDim1(), X.getDim2(), X.getRowsInBlock(), X.getColsInBlock(), -1);
        setLineNumbers(grpX);
        Lop grpW = W.constructLops();
        if (grpW.getDataType() == DataType.MATRIX) {
            grpW = new Group(W.constructLops(), Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
            grpW.getOutputParameters().setDimensions(W.getDim1(), W.getDim2(), W.getRowsInBlock(), W.getColsInBlock(), -1);
            setLineNumbers(grpW);
        }
        Lop lU = constructLeftFactorMRLop(U, V, cacheU, m1Size);
        Lop lV = constructRightFactorMRLop(U, V, cacheV, m2Size);
        // reduce-side wsloss w/ or without broadcast
        Lop wsloss = new WeightedSquaredLossR(grpX, lU, lV, grpW, DataType.MATRIX, ValueType.DOUBLE, wtype, cacheU, cacheV, ExecType.MR);
        wsloss.getOutputParameters().setDimensions(1, 1, X.getRowsInBlock(), X.getColsInBlock(), -1);
        setLineNumbers(wsloss);
        Group grp = new Group(wsloss, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        grp.getOutputParameters().setDimensions(1, 1, X.getRowsInBlock(), X.getColsInBlock(), -1);
        setLineNumbers(grp);
        Aggregate agg1 = new Aggregate(grp, HopsAgg2Lops.get(AggOp.SUM), DataType.MATRIX, ValueType.DOUBLE, ExecType.MR);
        // aggregation uses kahanSum
        agg1.setupCorrectionLocation(CorrectionLocationType.NONE);
        agg1.getOutputParameters().setDimensions(1, 1, X.getRowsInBlock(), X.getColsInBlock(), -1);
        setLineNumbers(agg1);
        UnaryCP unary1 = new UnaryCP(agg1, HopsOpOp1LopsUS.get(OpOp1.CAST_AS_SCALAR), getDataType(), getValueType());
        unary1.getOutputParameters().setDimensions(0, 0, 0, 0, -1);
        setLineNumbers(unary1);
        setLops(unary1);
    }
}
Also used : Group(org.apache.sysml.lops.Group) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) WeightedSquaredLoss(org.apache.sysml.lops.WeightedSquaredLoss) Lop(org.apache.sysml.lops.Lop) Aggregate(org.apache.sysml.lops.Aggregate) DataPartition(org.apache.sysml.lops.DataPartition) WeightedSquaredLossR(org.apache.sysml.lops.WeightedSquaredLossR) UnaryCP(org.apache.sysml.lops.UnaryCP)

Example 30 with Aggregate

use of org.apache.sysml.lops.Aggregate in project incubator-systemml by apache.

the class ReorgOp method constructLops.

@Override
public Lop constructLops() {
    // return already created lops
    if (getLops() != null)
        return getLops();
    ExecType et = optFindExecType();
    switch(op) {
        case TRANSPOSE:
            {
                Lop lin = getInput().get(0).constructLops();
                if (lin instanceof Transform && ((Transform) lin).getOperationType() == OperationTypes.Transpose)
                    // if input is already a transpose, avoid redundant transpose ops
                    setLops(lin.getInputs().get(0));
                else if (getDim1() == 1 && getDim2() == 1)
                    // if input of size 1x1, avoid unnecessary transpose
                    setLops(lin);
                else {
                    // general case
                    int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
                    Transform transform1 = new Transform(lin, HopsTransf2Lops.get(op), getDataType(), getValueType(), et, k);
                    setOutputDimensions(transform1);
                    setLineNumbers(transform1);
                    setLops(transform1);
                }
                break;
            }
        case DIAG:
            {
                Transform transform1 = new Transform(getInput().get(0).constructLops(), HopsTransf2Lops.get(op), getDataType(), getValueType(), et);
                setOutputDimensions(transform1);
                setLineNumbers(transform1);
                setLops(transform1);
                break;
            }
        case REV:
            {
                Lop rev = null;
                if (et == ExecType.MR) {
                    Lop tmp = new Transform(getInput().get(0).constructLops(), HopsTransf2Lops.get(op), getDataType(), getValueType(), et);
                    setOutputDimensions(tmp);
                    setLineNumbers(tmp);
                    Group group1 = new Group(tmp, Group.OperationTypes.Sort, DataType.MATRIX, getValueType());
                    setOutputDimensions(group1);
                    setLineNumbers(group1);
                    rev = new Aggregate(group1, Aggregate.OperationTypes.Sum, DataType.MATRIX, getValueType(), et);
                } else {
                    // CP/SPARK
                    rev = new Transform(getInput().get(0).constructLops(), HopsTransf2Lops.get(op), getDataType(), getValueType(), et);
                }
                setOutputDimensions(rev);
                setLineNumbers(rev);
                setLops(rev);
                break;
            }
        case RESHAPE:
            {
                // main, rows, cols, byrow
                Lop[] linputs = new Lop[4];
                for (int i = 0; i < 4; i++) linputs[i] = getInput().get(i).constructLops();
                if (et == ExecType.MR) {
                    Transform transform1 = new Transform(linputs, HopsTransf2Lops.get(op), getDataType(), getValueType(), et);
                    setOutputDimensions(transform1);
                    setLineNumbers(transform1);
                    Group group1 = new Group(transform1, Group.OperationTypes.Sort, DataType.MATRIX, getValueType());
                    setOutputDimensions(group1);
                    setLineNumbers(group1);
                    Aggregate agg1 = new Aggregate(group1, Aggregate.OperationTypes.Sum, DataType.MATRIX, getValueType(), et);
                    setOutputDimensions(agg1);
                    setLineNumbers(agg1);
                    setLops(agg1);
                } else // CP/SPARK
                {
                    Transform transform1 = new Transform(linputs, HopsTransf2Lops.get(op), getDataType(), getValueType(), et);
                    setOutputDimensions(transform1);
                    setLineNumbers(transform1);
                    setLops(transform1);
                }
                break;
            }
        case SORT:
            {
                Hop input = getInput().get(0);
                Hop by = getInput().get(1);
                Hop desc = getInput().get(2);
                Hop ixret = getInput().get(3);
                if (et == ExecType.MR) {
                    if (!(desc instanceof LiteralOp && ixret instanceof LiteralOp)) {
                        LOG.warn("Unsupported non-constant ordering parameters, using defaults and mark for recompilation.");
                        setRequiresRecompile();
                        desc = new LiteralOp(false);
                        ixret = new LiteralOp(false);
                    }
                    // Step 1: extraction (if unknown ncol or multiple columns)
                    Hop vinput = input;
                    if (input.getDim2() != 1) {
                        vinput = new IndexingOp("tmp1", getDataType(), getValueType(), input, new LiteralOp(1L), HopRewriteUtils.createValueHop(input, true), by, by, false, true);
                        vinput.refreshSizeInformation();
                        vinput.setOutputBlocksizes(getRowsInBlock(), getColsInBlock());
                        HopRewriteUtils.copyLineNumbers(this, vinput);
                    }
                    // Step 2: Index vector sort
                    Hop voutput = null;
                    if (2 * OptimizerUtils.estimateSize(vinput.getDim1(), vinput.getDim2()) > OptimizerUtils.getLocalMemBudget() || FORCE_DIST_SORT_INDEXES) {
                        // large vector, fallback to MR sort
                        // sort indexes according to given values
                        SortKeys sort = new SortKeys(vinput.constructLops(), HopRewriteUtils.getBooleanValueSafe((LiteralOp) desc), SortKeys.OperationTypes.Indexes, vinput.getDataType(), vinput.getValueType(), ExecType.MR);
                        sort.getOutputParameters().setDimensions(vinput.getDim1(), 1, vinput.getRowsInBlock(), vinput.getColsInBlock(), vinput.getNnz());
                        setLineNumbers(sort);
                        // note: this sortindexes includes also the shift by offsets and
                        // final aggregate because sideways passing of offsets would
                        // not nicely fit the current instruction model
                        setLops(sort);
                        voutput = this;
                    } else {
                        // small vector, use in-memory sort
                        ArrayList<Hop> sinputs = new ArrayList<>();
                        sinputs.add(vinput);
                        // by (always vector)
                        sinputs.add(new LiteralOp(1));
                        sinputs.add(desc);
                        // indexreturn (always indexes)
                        sinputs.add(new LiteralOp(true));
                        voutput = new ReorgOp("tmp3", getDataType(), getValueType(), ReOrgOp.SORT, sinputs);
                        HopRewriteUtils.copyLineNumbers(this, voutput);
                        // explicitly construct CP lop; otherwise there is danger of infinite recursion if forced runtime platform.
                        voutput.setLops(constructCPOrSparkSortLop(vinput, sinputs.get(1), sinputs.get(2), sinputs.get(3), ExecType.CP, false));
                        voutput.getLops().getOutputParameters().setDimensions(vinput.getDim1(), vinput.getDim2(), vinput.getRowsInBlock(), vinput.getColsInBlock(), vinput.getNnz());
                        setLops(voutput.constructLops());
                    }
                    // -- done via X' = table(seq(), IX') %*% X;
                    if (!HopRewriteUtils.getBooleanValueSafe((LiteralOp) ixret)) {
                        // generate seq
                        DataGenOp seq = HopRewriteUtils.createSeqDataGenOp(voutput);
                        seq.setName("tmp4");
                        seq.refreshSizeInformation();
                        // select exec type
                        seq.computeMemEstimate(new MemoTable());
                        HopRewriteUtils.copyLineNumbers(this, seq);
                        // generate table
                        TernaryOp table = new TernaryOp("tmp5", DataType.MATRIX, ValueType.DOUBLE, OpOp3.CTABLE, seq, voutput, new LiteralOp(1L));
                        table.setOutputBlocksizes(getRowsInBlock(), getColsInBlock());
                        table.refreshSizeInformation();
                        // force MR
                        table.setForcedExecType(ExecType.MR);
                        HopRewriteUtils.copyLineNumbers(this, table);
                        table.setDisjointInputs(true);
                        table.setOutputEmptyBlocks(false);
                        // generate matrix mult
                        AggBinaryOp mmult = HopRewriteUtils.createMatrixMultiply(table, input);
                        // force MR
                        mmult.setForcedExecType(ExecType.MR);
                        setLops(mmult.constructLops());
                        // cleanups
                        HopRewriteUtils.removeChildReference(table, input);
                    }
                } else if (et == ExecType.SPARK) {
                    boolean sortRewrite = !FORCE_DIST_SORT_INDEXES && isSortSPRewriteApplicable() && by.getDataType().isScalar();
                    Lop transform1 = constructCPOrSparkSortLop(input, by, desc, ixret, et, sortRewrite);
                    setOutputDimensions(transform1);
                    setLineNumbers(transform1);
                    setLops(transform1);
                } else // CP
                {
                    Lop transform1 = constructCPOrSparkSortLop(input, by, desc, ixret, et, false);
                    setOutputDimensions(transform1);
                    setLineNumbers(transform1);
                    setLops(transform1);
                }
                break;
            }
        default:
            throw new HopsException("Unsupported lops construction for operation type '" + op + "'.");
    }
    // add reblock/checkpoint lops if necessary
    constructAndSetLopsDataFlowProperties();
    return getLops();
}
Also used : Group(org.apache.sysml.lops.Group) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) ArrayList(java.util.ArrayList) Lop(org.apache.sysml.lops.Lop) SortKeys(org.apache.sysml.lops.SortKeys) ExecType(org.apache.sysml.lops.LopProperties.ExecType) Transform(org.apache.sysml.lops.Transform) Aggregate(org.apache.sysml.lops.Aggregate)

Aggregations

Aggregate (org.apache.sysml.lops.Aggregate)42 Group (org.apache.sysml.lops.Group)38 MultiThreadedHop (org.apache.sysml.hops.Hop.MultiThreadedHop)32 Lop (org.apache.sysml.lops.Lop)32 DataPartition (org.apache.sysml.lops.DataPartition)20 ExecType (org.apache.sysml.lops.LopProperties.ExecType)20 PartialAggregate (org.apache.sysml.lops.PartialAggregate)10 UnaryCP (org.apache.sysml.lops.UnaryCP)10 CombineUnary (org.apache.sysml.lops.CombineUnary)6 Data (org.apache.sysml.lops.Data)6 GroupedAggregate (org.apache.sysml.lops.GroupedAggregate)6 SortKeys (org.apache.sysml.lops.SortKeys)6 Transform (org.apache.sysml.lops.Transform)6 Unary (org.apache.sysml.lops.Unary)6 ArrayList (java.util.ArrayList)4 SparkAggType (org.apache.sysml.hops.AggBinaryOp.SparkAggType)4 OperationTypes (org.apache.sysml.lops.Aggregate.OperationTypes)4 AppendR (org.apache.sysml.lops.AppendR)4 CumulativePartialAggregate (org.apache.sysml.lops.CumulativePartialAggregate)4 CumulativeSplitAggregate (org.apache.sysml.lops.CumulativeSplitAggregate)4