Search in sources :

Example 1 with Unary

use of org.apache.sysml.lops.Unary in project incubator-systemml by apache.

the class UnaryOp method constructLops.

@Override
public Lop constructLops() throws HopsException, LopsException {
    //reuse existing lop
    if (getLops() != null)
        return getLops();
    try {
        Hop input = getInput().get(0);
        if (//value type casts or matrix to scalar
        getDataType() == DataType.SCALAR || (_op == OpOp1.CAST_AS_MATRIX && getInput().get(0).getDataType() == DataType.SCALAR) || (_op == OpOp1.CAST_AS_FRAME && getInput().get(0).getDataType() == DataType.SCALAR)) {
            if (//special handling IQM
            _op == Hop.OpOp1.IQM) {
                Lop iqmLop = constructLopsIQM();
                setLops(iqmLop);
            } else if (_op == Hop.OpOp1.MEDIAN) {
                Lop medianLop = constructLopsMedian();
                setLops(medianLop);
            } else //general case SCALAR/CAST (always in CP)
            {
                UnaryCP.OperationTypes optype = HopsOpOp1LopsUS.get(_op);
                if (optype == null)
                    throw new HopsException("Unknown UnaryCP lop type for UnaryOp operation type '" + _op + "'");
                UnaryCP unary1 = null;
                if ((_op == Hop.OpOp1.NROW || _op == Hop.OpOp1.NCOL || _op == Hop.OpOp1.LENGTH) && input instanceof UnaryOp && ((UnaryOp) input).getOp() == OpOp1.SELP) {
                    // Dimensions does not change during sel+ operation.
                    // This case is helpful to avoid unnecessary sel+ operation for fused maxpooling.
                    unary1 = new UnaryCP(input.getInput().get(0).constructLops(), optype, getDataType(), getValueType());
                } else
                    unary1 = new UnaryCP(input.constructLops(), optype, getDataType(), getValueType());
                setOutputDimensions(unary1);
                setLineNumbers(unary1);
                setLops(unary1);
            }
        } else //general case MATRIX
        {
            ExecType et = optFindExecType();
            //special handling cumsum/cumprod/cummin/cumsum
            if (isCumulativeUnaryOperation() && et != ExecType.CP) {
                //TODO additional physical operation if offsets fit in memory
                Lop cumsumLop = null;
                if (et == ExecType.MR)
                    cumsumLop = constructLopsMRCumulativeUnary();
                else
                    cumsumLop = constructLopsSparkCumulativeUnary();
                setLops(cumsumLop);
            } else //default unary 
            {
                int k = isCumulativeUnaryOperation() ? OptimizerUtils.getConstrainedNumThreads(_maxNumThreads) : 1;
                switch(_op) {
                    case SELP:
                    case EXP:
                    case SQRT:
                    case LOG:
                    case ABS:
                    case ROUND:
                    case FLOOR:
                    case CEIL:
                    case SIN:
                    case COS:
                    case TAN:
                    case ASIN:
                    case ACOS:
                    case ATAN:
                    case SIGN:
                        et = findGPUExecTypeByMemEstimate(et);
                        break;
                    default:
                }
                Unary unary1 = new Unary(input.constructLops(), HopsOpOp1LopsU.get(_op), getDataType(), getValueType(), et, k);
                setOutputDimensions(unary1);
                setLineNumbers(unary1);
                setLops(unary1);
            }
        }
    } catch (Exception e) {
        throw new HopsException(this.printErrorLocation() + "error constructing Lops for UnaryOp Hop -- \n ", e);
    }
    //add reblock/checkpoint lops if necessary
    constructAndSetLopsDataFlowProperties();
    return getLops();
}
Also used : OperationTypes(org.apache.sysml.lops.Aggregate.OperationTypes) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) ExecType(org.apache.sysml.lops.LopProperties.ExecType) Lop(org.apache.sysml.lops.Lop) CombineUnary(org.apache.sysml.lops.CombineUnary) Unary(org.apache.sysml.lops.Unary) LopsException(org.apache.sysml.lops.LopsException) UnaryCP(org.apache.sysml.lops.UnaryCP)

Example 2 with Unary

use of org.apache.sysml.lops.Unary in project incubator-systemml by apache.

the class UnaryOp method constructLopsSparkCumulativeUnary.

private Lop constructLopsSparkCumulativeUnary() throws HopsException, LopsException {
    Hop input = getInput().get(0);
    long rlen = input.getDim1();
    long clen = input.getDim2();
    long brlen = input.getRowsInBlock();
    long bclen = input.getColsInBlock();
    boolean force = !dimsKnown() || _etypeForced == ExecType.SPARK;
    OperationTypes aggtype = getCumulativeAggType();
    Lop X = input.constructLops();
    Lop TEMP = X;
    ArrayList<Lop> DATA = new ArrayList<Lop>();
    int level = 0;
    //recursive preaggregation until aggregates fit into CP memory budget
    while (((2 * OptimizerUtils.estimateSize(TEMP.getOutputParameters().getNumRows(), clen) + OptimizerUtils.estimateSize(1, clen)) > OptimizerUtils.getLocalMemBudget() && TEMP.getOutputParameters().getNumRows() > 1) || force) {
        DATA.add(TEMP);
        //preaggregation per block (for spark, the CumulativePartialAggregate subsumes both
        //the preaggregation and subsequent block aggregation)
        long rlenAgg = (long) Math.ceil((double) TEMP.getOutputParameters().getNumRows() / brlen);
        Lop preagg = new CumulativePartialAggregate(TEMP, DataType.MATRIX, ValueType.DOUBLE, aggtype, ExecType.SPARK);
        preagg.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
        setLineNumbers(preagg);
        TEMP = preagg;
        level++;
        //in case of unknowns, generate one level
        force = false;
    }
    //in-memory cum sum (of partial aggregates)
    if (TEMP.getOutputParameters().getNumRows() != 1) {
        int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
        Unary unary1 = new Unary(TEMP, HopsOpOp1LopsU.get(_op), DataType.MATRIX, ValueType.DOUBLE, ExecType.CP, k);
        unary1.getOutputParameters().setDimensions(TEMP.getOutputParameters().getNumRows(), clen, brlen, bclen, -1);
        setLineNumbers(unary1);
        TEMP = unary1;
    }
    //split, group and mr cumsum
    while (level-- > 0) {
        //(for spark, the CumulativeOffsetBinary subsumes both the split aggregate and 
        //the subsequent offset binary apply of split aggregates against the original data)
        double initValue = getCumulativeInitValue();
        CumulativeOffsetBinary binary = new CumulativeOffsetBinary(DATA.get(level), TEMP, DataType.MATRIX, ValueType.DOUBLE, initValue, aggtype, ExecType.SPARK);
        binary.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(binary);
        TEMP = binary;
    }
    return TEMP;
}
Also used : CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate) OperationTypes(org.apache.sysml.lops.Aggregate.OperationTypes) CumulativeOffsetBinary(org.apache.sysml.lops.CumulativeOffsetBinary) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) ArrayList(java.util.ArrayList) Lop(org.apache.sysml.lops.Lop) CombineUnary(org.apache.sysml.lops.CombineUnary) Unary(org.apache.sysml.lops.Unary)

Example 3 with Unary

use of org.apache.sysml.lops.Unary in project incubator-systemml by apache.

the class UnaryOp method constructLopsMRCumulativeUnary.

/**
	 * MR Cumsum is currently based on a multipass algorithm of (1) preaggregation and (2) subsequent offsetting. 
	 * Note that we currently support one robust physical operator but many alternative
	 * realizations are possible for specific scenarios (e.g., when the preaggregated intermediate
	 * fit into the map task memory budget) or by creating custom job types.
	 * 
	 * @return low-level operator
	 * @throws HopsException if HopsException occurs
	 * @throws LopsException if LopsException occurs
	 */
private Lop constructLopsMRCumulativeUnary() throws HopsException, LopsException {
    Hop input = getInput().get(0);
    long rlen = input.getDim1();
    long clen = input.getDim2();
    long brlen = input.getRowsInBlock();
    long bclen = input.getColsInBlock();
    boolean force = !dimsKnown() || _etypeForced == ExecType.MR;
    OperationTypes aggtype = getCumulativeAggType();
    Lop X = input.constructLops();
    Lop TEMP = X;
    ArrayList<Lop> DATA = new ArrayList<Lop>();
    int level = 0;
    //recursive preaggregation until aggregates fit into CP memory budget
    while (((2 * OptimizerUtils.estimateSize(TEMP.getOutputParameters().getNumRows(), clen) + OptimizerUtils.estimateSize(1, clen)) > OptimizerUtils.getLocalMemBudget() && TEMP.getOutputParameters().getNumRows() > 1) || force) {
        DATA.add(TEMP);
        //preaggregation per block
        long rlenAgg = (long) Math.ceil((double) TEMP.getOutputParameters().getNumRows() / brlen);
        Lop preagg = new CumulativePartialAggregate(TEMP, DataType.MATRIX, ValueType.DOUBLE, aggtype, ExecType.MR);
        preagg.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
        setLineNumbers(preagg);
        Group group = new Group(preagg, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        group.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
        setLineNumbers(group);
        Aggregate agg = new Aggregate(group, HopsAgg2Lops.get(AggOp.SUM), getDataType(), getValueType(), ExecType.MR);
        agg.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
        // aggregation uses kahanSum but the inputs do not have correction values
        agg.setupCorrectionLocation(CorrectionLocationType.NONE);
        setLineNumbers(agg);
        TEMP = agg;
        level++;
        //in case of unknowns, generate one level
        force = false;
    }
    //in-memory cum sum (of partial aggregates)
    if (TEMP.getOutputParameters().getNumRows() != 1) {
        int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
        Unary unary1 = new Unary(TEMP, HopsOpOp1LopsU.get(_op), DataType.MATRIX, ValueType.DOUBLE, ExecType.CP, k);
        unary1.getOutputParameters().setDimensions(TEMP.getOutputParameters().getNumRows(), clen, brlen, bclen, -1);
        setLineNumbers(unary1);
        TEMP = unary1;
    }
    //split, group and mr cumsum
    while (level-- > 0) {
        double init = getCumulativeInitValue();
        CumulativeSplitAggregate split = new CumulativeSplitAggregate(TEMP, DataType.MATRIX, ValueType.DOUBLE, init);
        split.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(split);
        Group group1 = new Group(DATA.get(level), Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        group1.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(group1);
        Group group2 = new Group(split, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
        group2.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(group2);
        CumulativeOffsetBinary binary = new CumulativeOffsetBinary(group1, group2, DataType.MATRIX, ValueType.DOUBLE, aggtype, ExecType.MR);
        binary.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
        setLineNumbers(binary);
        TEMP = binary;
    }
    return TEMP;
}
Also used : Group(org.apache.sysml.lops.Group) CumulativeSplitAggregate(org.apache.sysml.lops.CumulativeSplitAggregate) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) ArrayList(java.util.ArrayList) Lop(org.apache.sysml.lops.Lop) CombineUnary(org.apache.sysml.lops.CombineUnary) Unary(org.apache.sysml.lops.Unary) CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate) OperationTypes(org.apache.sysml.lops.Aggregate.OperationTypes) CumulativeOffsetBinary(org.apache.sysml.lops.CumulativeOffsetBinary) PartialAggregate(org.apache.sysml.lops.PartialAggregate) CumulativeSplitAggregate(org.apache.sysml.lops.CumulativeSplitAggregate) Aggregate(org.apache.sysml.lops.Aggregate) CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate)

Example 4 with Unary

use of org.apache.sysml.lops.Unary in project incubator-systemml by apache.

the class Dag method getAggAndOtherInstructions.

/**
	 * Method to populate aggregate and other instructions in reducer.
	 * 
	 * @param node low-level operator
	 * @param execNodes list of exec nodes
	 * @param shuffleInstructions list of shuffle instructions
	 * @param aggInstructionsReducer ?
	 * @param otherInstructionsReducer ?
	 * @param nodeIndexMapping node index mapping
	 * @param start_index start index
	 * @param inputLabels list of input labels
	 * @param inputLops list of input lops
	 * @param MRJobLineNumbers MR job line numbers
	 * @return -1 if problem
	 * @throws LopsException if LopsException occurs
	 */
private int getAggAndOtherInstructions(Lop node, ArrayList<Lop> execNodes, ArrayList<String> shuffleInstructions, ArrayList<String> aggInstructionsReducer, ArrayList<String> otherInstructionsReducer, HashMap<Lop, Integer> nodeIndexMapping, int[] start_index, ArrayList<String> inputLabels, ArrayList<Lop> inputLops, ArrayList<Integer> MRJobLineNumbers) throws LopsException {
    int ret_val = -1;
    if (nodeIndexMapping.containsKey(node))
        return nodeIndexMapping.get(node);
    if (!execNodes.contains(node))
        return ret_val;
    ArrayList<Integer> inputIndices = new ArrayList<Integer>();
    // first element.
    if (node.getType() == Lop.Type.Data && ((Data) node).getOperationType() == Data.OperationTypes.WRITE) {
        ret_val = getAggAndOtherInstructions(node.getInputs().get(0), execNodes, shuffleInstructions, aggInstructionsReducer, otherInstructionsReducer, nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers);
        inputIndices.add(ret_val);
    } else {
        for (Lop cnode : node.getInputs()) {
            ret_val = getAggAndOtherInstructions(cnode, execNodes, shuffleInstructions, aggInstructionsReducer, otherInstructionsReducer, nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers);
            inputIndices.add(ret_val);
        }
    }
    if (node.getExecLocation() == ExecLocation.Data) {
        if (((Data) node).getFileFormatType() == FileFormatTypes.CSV && !(node.getInputs().get(0) instanceof ParameterizedBuiltin && ((ParameterizedBuiltin) node.getInputs().get(0)).getOp() == org.apache.sysml.lops.ParameterizedBuiltin.OperationTypes.TRANSFORM)) {
            // Generate write instruction, which goes into CSV_WRITE Job
            int output_index = start_index[0];
            shuffleInstructions.add(node.getInstructions(inputIndices.get(0), output_index));
            if (DMLScript.ENABLE_DEBUG_MODE) {
                MRJobLineNumbers.add(node._beginLine);
            }
            nodeIndexMapping.put(node, output_index);
            start_index[0]++;
            return output_index;
        } else {
            return ret_val;
        }
    }
    if (node.getExecLocation() == ExecLocation.MapAndReduce) {
        /* Generate Shuffle Instruction for "node", and return the index associated with produced output */
        boolean instGenerated = true;
        int output_index = start_index[0];
        switch(node.getType()) {
            /* Lop types that take a single input */
            case ReBlock:
            case CSVReBlock:
            case SortKeys:
            case CentralMoment:
            case CoVariance:
            case GroupedAgg:
            case DataPartition:
                shuffleInstructions.add(node.getInstructions(inputIndices.get(0), output_index));
                if (DMLScript.ENABLE_DEBUG_MODE) {
                    MRJobLineNumbers.add(node._beginLine);
                }
                break;
            case ParameterizedBuiltin:
                if (((ParameterizedBuiltin) node).getOp() == org.apache.sysml.lops.ParameterizedBuiltin.OperationTypes.TRANSFORM) {
                    shuffleInstructions.add(node.getInstructions(output_index));
                    if (DMLScript.ENABLE_DEBUG_MODE) {
                        MRJobLineNumbers.add(node._beginLine);
                    }
                }
                break;
            /* Lop types that take two inputs */
            case MMCJ:
            case MMRJ:
            case CombineBinary:
                shuffleInstructions.add(node.getInstructions(inputIndices.get(0), inputIndices.get(1), output_index));
                if (DMLScript.ENABLE_DEBUG_MODE) {
                    MRJobLineNumbers.add(node._beginLine);
                }
                break;
            /* Lop types that take three inputs */
            case CombineTernary:
                shuffleInstructions.add(node.getInstructions(inputIndices.get(0), inputIndices.get(1), inputIndices.get(2), output_index));
                if (DMLScript.ENABLE_DEBUG_MODE) {
                    MRJobLineNumbers.add(node._beginLine);
                }
                break;
            default:
                instGenerated = false;
                break;
        }
        if (instGenerated) {
            nodeIndexMapping.put(node, output_index);
            start_index[0]++;
            return output_index;
        } else {
            return inputIndices.get(0);
        }
    }
    /* Get instructions for aligned reduce and other lops below the reduce. */
    if (node.getExecLocation() == ExecLocation.Reduce || node.getExecLocation() == ExecLocation.MapOrReduce || hasChildNode(node, execNodes, ExecLocation.MapAndReduce)) {
        if (inputIndices.size() == 1) {
            int output_index = start_index[0];
            start_index[0]++;
            if (node.getType() == Type.Aggregate) {
                aggInstructionsReducer.add(node.getInstructions(inputIndices.get(0), output_index));
                if (DMLScript.ENABLE_DEBUG_MODE) {
                    MRJobLineNumbers.add(node._beginLine);
                }
            } else {
                otherInstructionsReducer.add(node.getInstructions(inputIndices.get(0), output_index));
            }
            if (DMLScript.ENABLE_DEBUG_MODE) {
                MRJobLineNumbers.add(node._beginLine);
            }
            nodeIndexMapping.put(node, output_index);
            return output_index;
        } else if (inputIndices.size() == 2) {
            int output_index = start_index[0];
            start_index[0]++;
            otherInstructionsReducer.add(node.getInstructions(inputIndices.get(0), inputIndices.get(1), output_index));
            if (DMLScript.ENABLE_DEBUG_MODE) {
                MRJobLineNumbers.add(node._beginLine);
            }
            nodeIndexMapping.put(node, output_index);
            if (node instanceof Unary && node.getInputs().size() > 1) {
                int index = 0;
                for (int i = 0; i < node.getInputs().size(); i++) {
                    if (node.getInputs().get(i).getDataType() == DataType.SCALAR) {
                        index = i;
                        break;
                    }
                }
                if (node.getInputs().get(index).getExecLocation() == ExecLocation.Data && !((Data) (node.getInputs().get(index))).isLiteral()) {
                    inputLabels.add(node.getInputs().get(index).getOutputParameters().getLabel());
                    inputLops.add(node.getInputs().get(index));
                }
                if (node.getInputs().get(index).getExecLocation() != ExecLocation.Data) {
                    inputLabels.add(node.getInputs().get(index).getOutputParameters().getLabel());
                    inputLops.add(node.getInputs().get(index));
                }
            }
            return output_index;
        } else if (inputIndices.size() == 3 || node.getType() == Type.Ternary) {
            int output_index = start_index[0];
            start_index[0]++;
            if (node.getType() == Type.Ternary) {
                // in case of CTABLE_TRANSFORM_SCALAR_WEIGHT: inputIndices.get(2) would be -1
                otherInstructionsReducer.add(node.getInstructions(inputIndices.get(0), inputIndices.get(1), inputIndices.get(2), output_index));
                if (DMLScript.ENABLE_DEBUG_MODE) {
                    MRJobLineNumbers.add(node._beginLine);
                }
                nodeIndexMapping.put(node, output_index);
            } else if (node.getType() == Type.ParameterizedBuiltin) {
                otherInstructionsReducer.add(node.getInstructions(inputIndices.get(0), inputIndices.get(1), inputIndices.get(2), output_index));
                if (DMLScript.ENABLE_DEBUG_MODE) {
                    MRJobLineNumbers.add(node._beginLine);
                }
                nodeIndexMapping.put(node, output_index);
            } else {
                otherInstructionsReducer.add(node.getInstructions(inputIndices.get(0), inputIndices.get(1), inputIndices.get(2), output_index));
                if (DMLScript.ENABLE_DEBUG_MODE) {
                    MRJobLineNumbers.add(node._beginLine);
                }
                nodeIndexMapping.put(node, output_index);
                return output_index;
            }
            return output_index;
        } else if (inputIndices.size() == 4) {
            int output_index = start_index[0];
            start_index[0]++;
            otherInstructionsReducer.add(node.getInstructions(inputIndices.get(0), inputIndices.get(1), inputIndices.get(2), inputIndices.get(3), output_index));
            if (DMLScript.ENABLE_DEBUG_MODE) {
                MRJobLineNumbers.add(node._beginLine);
            }
            nodeIndexMapping.put(node, output_index);
            return output_index;
        } else
            throw new LopsException("Invalid number of inputs to a lop: " + inputIndices.size());
    }
    return -1;
}
Also used : LopsException(org.apache.sysml.lops.LopsException) ParameterizedBuiltin(org.apache.sysml.lops.ParameterizedBuiltin) ArrayList(java.util.ArrayList) Data(org.apache.sysml.lops.Data) Lop(org.apache.sysml.lops.Lop) Unary(org.apache.sysml.lops.Unary)

Example 5 with Unary

use of org.apache.sysml.lops.Unary in project incubator-systemml by apache.

the class UnaryOp method constructLopsIQM.

private Lop constructLopsIQM() throws HopsException, LopsException {
    ExecType et = optFindExecType();
    Hop input = getInput().get(0);
    if (et == ExecType.MR) {
        CombineUnary combine = CombineUnary.constructCombineLop(input.constructLops(), DataType.MATRIX, getValueType());
        combine.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), input.getRowsInBlock(), input.getColsInBlock(), input.getNnz());
        SortKeys sort = SortKeys.constructSortByValueLop(combine, SortKeys.OperationTypes.WithoutWeights, DataType.MATRIX, ValueType.DOUBLE, ExecType.MR);
        // Sort dimensions are same as the first input
        sort.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), input.getRowsInBlock(), input.getColsInBlock(), input.getNnz());
        Data lit = Data.createLiteralLop(ValueType.DOUBLE, Double.toString(0.25));
        lit.setAllPositions(this.getBeginLine(), this.getBeginColumn(), this.getEndLine(), this.getEndColumn());
        PickByCount pick = new PickByCount(sort, lit, DataType.MATRIX, getValueType(), PickByCount.OperationTypes.RANGEPICK);
        pick.getOutputParameters().setDimensions(-1, -1, getRowsInBlock(), getColsInBlock(), -1);
        setLineNumbers(pick);
        PartialAggregate pagg = new PartialAggregate(pick, HopsAgg2Lops.get(Hop.AggOp.SUM), HopsDirection2Lops.get(Hop.Direction.RowCol), DataType.MATRIX, getValueType());
        setLineNumbers(pagg);
        // Set the dimensions of PartialAggregate LOP based on the
        // direction in which aggregation is performed
        pagg.setDimensionsBasedOnDirection(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock());
        Group group1 = new Group(pagg, Group.OperationTypes.Sort, DataType.MATRIX, getValueType());
        group1.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
        setLineNumbers(group1);
        Aggregate agg1 = new Aggregate(group1, HopsAgg2Lops.get(Hop.AggOp.SUM), DataType.MATRIX, getValueType(), ExecType.MR);
        agg1.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
        agg1.setupCorrectionLocation(pagg.getCorrectionLocation());
        setLineNumbers(agg1);
        UnaryCP unary1 = new UnaryCP(agg1, HopsOpOp1LopsUS.get(OpOp1.CAST_AS_SCALAR), getDataType(), getValueType());
        unary1.getOutputParameters().setDimensions(0, 0, 0, 0, -1);
        setLineNumbers(unary1);
        Unary iqm = new Unary(sort, unary1, Unary.OperationTypes.MR_IQM, DataType.SCALAR, ValueType.DOUBLE, ExecType.CP);
        iqm.getOutputParameters().setDimensions(0, 0, 0, 0, -1);
        setLineNumbers(iqm);
        return iqm;
    } else {
        SortKeys sort = SortKeys.constructSortByValueLop(input.constructLops(), SortKeys.OperationTypes.WithoutWeights, DataType.MATRIX, ValueType.DOUBLE, et);
        sort.getOutputParameters().setDimensions(input.getDim1(), input.getDim2(), input.getRowsInBlock(), input.getColsInBlock(), input.getNnz());
        PickByCount pick = new PickByCount(sort, null, getDataType(), getValueType(), PickByCount.OperationTypes.IQM, et, true);
        pick.getOutputParameters().setDimensions(getDim1(), getDim2(), getRowsInBlock(), getColsInBlock(), getNnz());
        setLineNumbers(pick);
        return pick;
    }
}
Also used : PartialAggregate(org.apache.sysml.lops.PartialAggregate) CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate) SortKeys(org.apache.sysml.lops.SortKeys) Group(org.apache.sysml.lops.Group) PickByCount(org.apache.sysml.lops.PickByCount) CombineUnary(org.apache.sysml.lops.CombineUnary) MultiThreadedHop(org.apache.sysml.hops.Hop.MultiThreadedHop) ExecType(org.apache.sysml.lops.LopProperties.ExecType) Data(org.apache.sysml.lops.Data) PartialAggregate(org.apache.sysml.lops.PartialAggregate) CumulativeSplitAggregate(org.apache.sysml.lops.CumulativeSplitAggregate) Aggregate(org.apache.sysml.lops.Aggregate) CumulativePartialAggregate(org.apache.sysml.lops.CumulativePartialAggregate) CombineUnary(org.apache.sysml.lops.CombineUnary) Unary(org.apache.sysml.lops.Unary) UnaryCP(org.apache.sysml.lops.UnaryCP)

Aggregations

Unary (org.apache.sysml.lops.Unary)8 CombineUnary (org.apache.sysml.lops.CombineUnary)6 Lop (org.apache.sysml.lops.Lop)6 ArrayList (java.util.ArrayList)4 MultiThreadedHop (org.apache.sysml.hops.Hop.MultiThreadedHop)4 Data (org.apache.sysml.lops.Data)4 Group (org.apache.sysml.lops.Group)4 Aggregate (org.apache.sysml.lops.Aggregate)3 OperationTypes (org.apache.sysml.lops.Aggregate.OperationTypes)3 CumulativePartialAggregate (org.apache.sysml.lops.CumulativePartialAggregate)3 ExecType (org.apache.sysml.lops.LopProperties.ExecType)3 LopsException (org.apache.sysml.lops.LopsException)3 PartialAggregate (org.apache.sysml.lops.PartialAggregate)3 UnaryCP (org.apache.sysml.lops.UnaryCP)3 CombineBinary (org.apache.sysml.lops.CombineBinary)2 CumulativeOffsetBinary (org.apache.sysml.lops.CumulativeOffsetBinary)2 CumulativeSplitAggregate (org.apache.sysml.lops.CumulativeSplitAggregate)2 PickByCount (org.apache.sysml.lops.PickByCount)2 SortKeys (org.apache.sysml.lops.SortKeys)2 Binary (org.apache.sysml.lops.Binary)1