Search in sources :

Example 11 with GroupByDesc

use of org.apache.hadoop.hive.ql.plan.GroupByDesc in project hive by apache.

the class VectorGroupByOperatorBench method buildGroupByDescType.

private GroupByDesc buildGroupByDescType(String aggregate, GenericUDAFEvaluator.Mode mode, String column, TypeInfo dataType, final VectorGroupByDesc.ProcessingMode processMode) throws SemanticException {
    AggregationDesc agg = buildAggregationDesc(aggregate, mode, column, dataType);
    ArrayList<AggregationDesc> aggs = new ArrayList<AggregationDesc>();
    aggs.add(agg);
    ArrayList<String> outputColumnNames = new ArrayList<String>();
    outputColumnNames.add("_col0");
    GroupByDesc desc = new GroupByDesc();
    desc.setVectorDesc(new VectorGroupByDesc());
    desc.setOutputColumnNames(outputColumnNames);
    desc.setAggregators(aggs);
    ((VectorGroupByDesc) desc.getVectorDesc()).setProcessingMode(processMode);
    return desc;
}
Also used : ArrayList(java.util.ArrayList) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc)

Example 12 with GroupByDesc

use of org.apache.hadoop.hive.ql.plan.GroupByDesc in project hive by apache.

the class DynamicPartitionPruningOptimization method generateSemiJoinOperatorPlan.

// Generates plan for min/max when dynamic partition pruning is ruled out.
private boolean generateSemiJoinOperatorPlan(DynamicListContext ctx, ParseContext parseContext, TableScanOperator ts, String keyBaseAlias, String internalColName, String colName, SemiJoinHint sjHint) throws SemanticException {
    // we will put a fork in the plan at the source of the reduce sink
    Operator<? extends OperatorDesc> parentOfRS = ctx.generator.getParentOperators().get(0);
    // we need the expr that generated the key of the reduce sink
    ExprNodeDesc key = ctx.generator.getConf().getKeyCols().get(ctx.desc.getKeyIndex());
    assert colName != null;
    // Fetch the TableScan Operator.
    Operator<?> op = parentOfRS;
    while (!(op == null || op instanceof TableScanOperator || op instanceof ReduceSinkOperator)) {
        op = op.getParentOperators().get(0);
    }
    Preconditions.checkNotNull(op);
    if (op instanceof TableScanOperator) {
        Table table = ((TableScanOperator) op).getConf().getTableMetadata();
        if (table.isPartitionKey(colName)) {
            // The column is partition column, skip the optimization.
            return false;
        }
    }
    // Check if there already exists a semijoin branch
    GroupByOperator gb = parseContext.getColExprToGBMap().get(key);
    if (gb != null) {
        // Already an existing semijoin branch, reuse it
        createFinalRsForSemiJoinOp(parseContext, ts, gb, key, keyBaseAlias, ctx.parent.getChildren().get(0), sjHint != null);
        // done!
        return true;
    }
    List<ExprNodeDesc> keyExprs = new ArrayList<ExprNodeDesc>();
    keyExprs.add(key);
    // group by requires "ArrayList", don't ask.
    ArrayList<String> outputNames = new ArrayList<String>();
    outputNames.add(HiveConf.getColumnInternalName(0));
    // project the relevant key column
    SelectDesc select = new SelectDesc(keyExprs, outputNames);
    // Create the new RowSchema for the projected column
    ColumnInfo columnInfo = parentOfRS.getSchema().getColumnInfo(internalColName);
    ArrayList<ColumnInfo> signature = new ArrayList<ColumnInfo>();
    signature.add(columnInfo);
    RowSchema rowSchema = new RowSchema(signature);
    // Create the column expr map
    Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
    ExprNodeDesc exprNode = null;
    if (parentOfRS.getColumnExprMap() != null) {
        exprNode = parentOfRS.getColumnExprMap().get(internalColName).clone();
    } else {
        exprNode = new ExprNodeColumnDesc(columnInfo);
    }
    if (exprNode instanceof ExprNodeColumnDesc) {
        ExprNodeColumnDesc encd = (ExprNodeColumnDesc) exprNode;
        encd.setColumn(internalColName);
    }
    colExprMap.put(internalColName, exprNode);
    // Create the Select Operator
    SelectOperator selectOp = (SelectOperator) OperatorFactory.getAndMakeChild(select, rowSchema, colExprMap, parentOfRS);
    // do a group by to aggregate min,max and bloom filter.
    float groupByMemoryUsage = HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY);
    float memoryThreshold = HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD);
    // Add min/max and bloom filter aggregations
    List<ObjectInspector> aggFnOIs = new ArrayList<ObjectInspector>();
    aggFnOIs.add(key.getWritableObjectInspector());
    ArrayList<ExprNodeDesc> params = new ArrayList<ExprNodeDesc>();
    params.add(new ExprNodeColumnDesc(key.getTypeInfo(), outputNames.get(0), "", false));
    ArrayList<AggregationDesc> aggs = new ArrayList<AggregationDesc>();
    try {
        AggregationDesc min = new AggregationDesc("min", FunctionRegistry.getGenericUDAFEvaluator("min", aggFnOIs, false, false), params, false, Mode.PARTIAL1);
        AggregationDesc max = new AggregationDesc("max", FunctionRegistry.getGenericUDAFEvaluator("max", aggFnOIs, false, false), params, false, Mode.PARTIAL1);
        AggregationDesc bloomFilter = new AggregationDesc("bloom_filter", FunctionRegistry.getGenericUDAFEvaluator("bloom_filter", aggFnOIs, false, false), params, false, Mode.PARTIAL1);
        GenericUDAFBloomFilterEvaluator bloomFilterEval = (GenericUDAFBloomFilterEvaluator) bloomFilter.getGenericUDAFEvaluator();
        bloomFilterEval.setSourceOperator(selectOp);
        if (sjHint != null && sjHint.getNumEntries() > 0) {
            LOG.debug("Setting size for " + keyBaseAlias + " to " + sjHint.getNumEntries() + " based on the hint");
            bloomFilterEval.setHintEntries(sjHint.getNumEntries());
        }
        bloomFilterEval.setMaxEntries(parseContext.getConf().getLongVar(ConfVars.TEZ_MAX_BLOOM_FILTER_ENTRIES));
        bloomFilterEval.setMinEntries(parseContext.getConf().getLongVar(ConfVars.TEZ_MIN_BLOOM_FILTER_ENTRIES));
        bloomFilterEval.setFactor(parseContext.getConf().getFloatVar(ConfVars.TEZ_BLOOM_FILTER_FACTOR));
        bloomFilter.setGenericUDAFWritableEvaluator(bloomFilterEval);
        aggs.add(min);
        aggs.add(max);
        aggs.add(bloomFilter);
    } catch (SemanticException e) {
        LOG.error("Error creating min/max aggregations on key", e);
        throw new IllegalStateException("Error creating min/max aggregations on key", e);
    }
    // Create the Group by Operator
    ArrayList<String> gbOutputNames = new ArrayList<String>();
    gbOutputNames.add(SemanticAnalyzer.getColumnInternalName(0));
    gbOutputNames.add(SemanticAnalyzer.getColumnInternalName(1));
    gbOutputNames.add(SemanticAnalyzer.getColumnInternalName(2));
    GroupByDesc groupBy = new GroupByDesc(GroupByDesc.Mode.HASH, gbOutputNames, new ArrayList<ExprNodeDesc>(), aggs, false, groupByMemoryUsage, memoryThreshold, null, false, -1, false);
    ArrayList<ColumnInfo> groupbyColInfos = new ArrayList<ColumnInfo>();
    groupbyColInfos.add(new ColumnInfo(gbOutputNames.get(0), key.getTypeInfo(), "", false));
    groupbyColInfos.add(new ColumnInfo(gbOutputNames.get(1), key.getTypeInfo(), "", false));
    groupbyColInfos.add(new ColumnInfo(gbOutputNames.get(2), key.getTypeInfo(), "", false));
    GroupByOperator groupByOp = (GroupByOperator) OperatorFactory.getAndMakeChild(groupBy, new RowSchema(groupbyColInfos), selectOp);
    groupByOp.setColumnExprMap(new HashMap<String, ExprNodeDesc>());
    // Get the column names of the aggregations for reduce sink
    int colPos = 0;
    ArrayList<ExprNodeDesc> rsValueCols = new ArrayList<ExprNodeDesc>();
    for (int i = 0; i < aggs.size() - 1; i++) {
        ExprNodeColumnDesc colExpr = new ExprNodeColumnDesc(key.getTypeInfo(), gbOutputNames.get(colPos++), "", false);
        rsValueCols.add(colExpr);
    }
    // Bloom Filter uses binary
    ExprNodeColumnDesc colExpr = new ExprNodeColumnDesc(TypeInfoFactory.binaryTypeInfo, gbOutputNames.get(colPos++), "", false);
    rsValueCols.add(colExpr);
    // Create the reduce sink operator
    ReduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc(new ArrayList<ExprNodeDesc>(), rsValueCols, gbOutputNames, false, -1, 0, 1, Operation.NOT_ACID);
    ReduceSinkOperator rsOp = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(rsDesc, new RowSchema(groupByOp.getSchema()), groupByOp);
    Map<String, ExprNodeDesc> columnExprMap = new HashMap<String, ExprNodeDesc>();
    rsOp.setColumnExprMap(columnExprMap);
    rsOp.getConf().setReducerTraits(EnumSet.of(ReduceSinkDesc.ReducerTraits.QUICKSTART));
    // Create the final Group By Operator
    ArrayList<AggregationDesc> aggsFinal = new ArrayList<AggregationDesc>();
    try {
        List<ObjectInspector> minFinalFnOIs = new ArrayList<ObjectInspector>();
        List<ObjectInspector> maxFinalFnOIs = new ArrayList<ObjectInspector>();
        List<ObjectInspector> bloomFilterFinalFnOIs = new ArrayList<ObjectInspector>();
        ArrayList<ExprNodeDesc> minFinalParams = new ArrayList<ExprNodeDesc>();
        ArrayList<ExprNodeDesc> maxFinalParams = new ArrayList<ExprNodeDesc>();
        ArrayList<ExprNodeDesc> bloomFilterFinalParams = new ArrayList<ExprNodeDesc>();
        // Use the expressions from Reduce Sink.
        minFinalFnOIs.add(rsValueCols.get(0).getWritableObjectInspector());
        maxFinalFnOIs.add(rsValueCols.get(1).getWritableObjectInspector());
        bloomFilterFinalFnOIs.add(rsValueCols.get(2).getWritableObjectInspector());
        // Coming from a ReduceSink the aggregations would be in the form VALUE._col0, VALUE._col1
        minFinalParams.add(new ExprNodeColumnDesc(rsValueCols.get(0).getTypeInfo(), Utilities.ReduceField.VALUE + "." + gbOutputNames.get(0), "", false));
        maxFinalParams.add(new ExprNodeColumnDesc(rsValueCols.get(1).getTypeInfo(), Utilities.ReduceField.VALUE + "." + gbOutputNames.get(1), "", false));
        bloomFilterFinalParams.add(new ExprNodeColumnDesc(rsValueCols.get(2).getTypeInfo(), Utilities.ReduceField.VALUE + "." + gbOutputNames.get(2), "", false));
        AggregationDesc min = new AggregationDesc("min", FunctionRegistry.getGenericUDAFEvaluator("min", minFinalFnOIs, false, false), minFinalParams, false, Mode.FINAL);
        AggregationDesc max = new AggregationDesc("max", FunctionRegistry.getGenericUDAFEvaluator("max", maxFinalFnOIs, false, false), maxFinalParams, false, Mode.FINAL);
        AggregationDesc bloomFilter = new AggregationDesc("bloom_filter", FunctionRegistry.getGenericUDAFEvaluator("bloom_filter", bloomFilterFinalFnOIs, false, false), bloomFilterFinalParams, false, Mode.FINAL);
        GenericUDAFBloomFilterEvaluator bloomFilterEval = (GenericUDAFBloomFilterEvaluator) bloomFilter.getGenericUDAFEvaluator();
        bloomFilterEval.setSourceOperator(selectOp);
        if (sjHint != null && sjHint.getNumEntries() > 0) {
            bloomFilterEval.setHintEntries(sjHint.getNumEntries());
        }
        bloomFilterEval.setMaxEntries(parseContext.getConf().getLongVar(ConfVars.TEZ_MAX_BLOOM_FILTER_ENTRIES));
        bloomFilterEval.setMinEntries(parseContext.getConf().getLongVar(ConfVars.TEZ_MIN_BLOOM_FILTER_ENTRIES));
        bloomFilterEval.setFactor(parseContext.getConf().getFloatVar(ConfVars.TEZ_BLOOM_FILTER_FACTOR));
        bloomFilter.setGenericUDAFWritableEvaluator(bloomFilterEval);
        aggsFinal.add(min);
        aggsFinal.add(max);
        aggsFinal.add(bloomFilter);
    } catch (SemanticException e) {
        LOG.error("Error creating min/max aggregations on key", e);
        throw new IllegalStateException("Error creating min/max aggregations on key", e);
    }
    GroupByDesc groupByDescFinal = new GroupByDesc(GroupByDesc.Mode.FINAL, gbOutputNames, new ArrayList<ExprNodeDesc>(), aggsFinal, false, groupByMemoryUsage, memoryThreshold, null, false, 0, false);
    GroupByOperator groupByOpFinal = (GroupByOperator) OperatorFactory.getAndMakeChild(groupByDescFinal, new RowSchema(rsOp.getSchema()), rsOp);
    groupByOpFinal.setColumnExprMap(new HashMap<String, ExprNodeDesc>());
    createFinalRsForSemiJoinOp(parseContext, ts, groupByOpFinal, key, keyBaseAlias, ctx.parent.getChildren().get(0), sjHint != null);
    return true;
}
Also used : TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) GenericUDAFBloomFilterEvaluator(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFBloomFilter.GenericUDAFBloomFilterEvaluator) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) SelectDesc(org.apache.hadoop.hive.ql.plan.SelectDesc) ReduceSinkDesc(org.apache.hadoop.hive.ql.plan.ReduceSinkDesc) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) Table(org.apache.hadoop.hive.ql.metadata.Table) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) SemiJoinHint(org.apache.hadoop.hive.ql.parse.SemiJoinHint) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc)

Example 13 with GroupByDesc

use of org.apache.hadoop.hive.ql.plan.GroupByDesc in project hive by apache.

the class TestVectorGroupByOperator method testAggregateLongIterable.

public void testAggregateLongIterable(String aggregateName, Iterable<VectorizedRowBatch> data, Object expected) throws HiveException {
    List<String> mapColumnNames = new ArrayList<String>();
    mapColumnNames.add("A");
    VectorizationContext ctx = new VectorizationContext("name", mapColumnNames);
    Pair<GroupByDesc, VectorGroupByDesc> pair = buildGroupByDescType(ctx, aggregateName, GenericUDAFEvaluator.Mode.PARTIAL1, "A", TypeInfoFactory.longTypeInfo);
    GroupByDesc desc = pair.fst;
    VectorGroupByDesc vectorDesc = pair.snd;
    CompilationOpContext cCtx = new CompilationOpContext();
    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
    VectorGroupByOperator vgo = (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx, vectorDesc);
    FakeCaptureVectorToRowOutputOperator out = FakeCaptureVectorToRowOutputOperator.addCaptureOutputChild(cCtx, vgo);
    vgo.initialize(hconf, null);
    for (VectorizedRowBatch unit : data) {
        vgo.process(unit, 0);
    }
    vgo.close(false);
    List<Object> outBatchList = out.getCapturedRows();
    assertNotNull(outBatchList);
    assertEquals(1, outBatchList.size());
    Object result = outBatchList.get(0);
    Validator validator = getValidator(aggregateName);
    validator.validate("_total", expected, result);
}
Also used : ArrayList(java.util.ArrayList) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) FakeCaptureVectorToRowOutputOperator(org.apache.hadoop.hive.ql.exec.vector.util.FakeCaptureVectorToRowOutputOperator) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc)

Example 14 with GroupByDesc

use of org.apache.hadoop.hive.ql.plan.GroupByDesc in project hive by apache.

the class TestVectorGroupByOperator method testAggregateDecimalIterable.

public void testAggregateDecimalIterable(String aggregateName, Iterable<VectorizedRowBatch> data, Object expected) throws HiveException {
    List<String> mapColumnNames = new ArrayList<String>();
    mapColumnNames.add("A");
    VectorizationContext ctx = new VectorizationContext("name", mapColumnNames);
    Pair<GroupByDesc, VectorGroupByDesc> pair = buildGroupByDescType(ctx, aggregateName, GenericUDAFEvaluator.Mode.PARTIAL1, "A", TypeInfoFactory.getDecimalTypeInfo(30, 4));
    GroupByDesc desc = pair.fst;
    VectorGroupByDesc vectorDesc = pair.snd;
    CompilationOpContext cCtx = new CompilationOpContext();
    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
    VectorGroupByOperator vgo = (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx, vectorDesc);
    FakeCaptureVectorToRowOutputOperator out = FakeCaptureVectorToRowOutputOperator.addCaptureOutputChild(cCtx, vgo);
    vgo.initialize(hconf, null);
    for (VectorizedRowBatch unit : data) {
        vgo.process(unit, 0);
    }
    vgo.close(false);
    List<Object> outBatchList = out.getCapturedRows();
    assertNotNull(outBatchList);
    assertEquals(1, outBatchList.size());
    Object result = outBatchList.get(0);
    Validator validator = getValidator(aggregateName);
    validator.validate("_total", expected, result);
}
Also used : ArrayList(java.util.ArrayList) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) FakeCaptureVectorToRowOutputOperator(org.apache.hadoop.hive.ql.exec.vector.util.FakeCaptureVectorToRowOutputOperator) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc)

Example 15 with GroupByDesc

use of org.apache.hadoop.hive.ql.plan.GroupByDesc in project hive by apache.

the class TestVectorGroupByOperator method testAggregateCountReduceIterable.

public void testAggregateCountReduceIterable(Iterable<VectorizedRowBatch> data, Object expected) throws HiveException {
    List<String> mapColumnNames = new ArrayList<String>();
    mapColumnNames.add("A");
    VectorizationContext ctx = new VectorizationContext("name", mapColumnNames);
    Pair<GroupByDesc, VectorGroupByDesc> pair = buildGroupByDescType(ctx, "count", GenericUDAFEvaluator.Mode.FINAL, "A", TypeInfoFactory.longTypeInfo);
    GroupByDesc desc = pair.fst;
    VectorGroupByDesc vectorDesc = pair.snd;
    // Use GLOBAL when no key for Reduce.
    vectorDesc.setProcessingMode(ProcessingMode.GLOBAL);
    CompilationOpContext cCtx = new CompilationOpContext();
    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
    VectorGroupByOperator vgo = (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx, vectorDesc);
    FakeCaptureVectorToRowOutputOperator out = FakeCaptureVectorToRowOutputOperator.addCaptureOutputChild(cCtx, vgo);
    vgo.initialize(hconf, null);
    for (VectorizedRowBatch unit : data) {
        vgo.process(unit, 0);
    }
    vgo.close(false);
    List<Object> outBatchList = out.getCapturedRows();
    assertNotNull(outBatchList);
    assertEquals(1, outBatchList.size());
    Object result = outBatchList.get(0);
    Validator validator = getValidator("count");
    validator.validate("_total", expected, result);
}
Also used : ArrayList(java.util.ArrayList) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) FakeCaptureVectorToRowOutputOperator(org.apache.hadoop.hive.ql.exec.vector.util.FakeCaptureVectorToRowOutputOperator) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc)

Aggregations

GroupByDesc (org.apache.hadoop.hive.ql.plan.GroupByDesc)36 ArrayList (java.util.ArrayList)30 AggregationDesc (org.apache.hadoop.hive.ql.plan.AggregationDesc)22 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)20 VectorGroupByDesc (org.apache.hadoop.hive.ql.plan.VectorGroupByDesc)20 HashMap (java.util.HashMap)18 GroupByOperator (org.apache.hadoop.hive.ql.exec.GroupByOperator)16 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)14 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)13 Operator (org.apache.hadoop.hive.ql.exec.Operator)13 RowSchema (org.apache.hadoop.hive.ql.exec.RowSchema)13 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)13 CompilationOpContext (org.apache.hadoop.hive.ql.CompilationOpContext)12 SelectOperator (org.apache.hadoop.hive.ql.exec.SelectOperator)10 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)10 FakeCaptureVectorToRowOutputOperator (org.apache.hadoop.hive.ql.exec.vector.util.FakeCaptureVectorToRowOutputOperator)10 Mode (org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode)10 FilterOperator (org.apache.hadoop.hive.ql.exec.FilterOperator)9 LinkedHashMap (java.util.LinkedHashMap)8 Map (java.util.Map)8