Search in sources :

Example 21 with AggregationDesc

use of org.apache.hadoop.hive.ql.plan.AggregationDesc in project hive by apache.

the class TestVectorGroupByOperator method buildAggregationDescCountStar.

private static AggregationDesc buildAggregationDescCountStar(VectorizationContext ctx) {
    AggregationDesc agg = new AggregationDesc();
    agg.setGenericUDAFName("count");
    agg.setMode(GenericUDAFEvaluator.Mode.PARTIAL1);
    agg.setParameters(new ArrayList<ExprNodeDesc>());
    agg.setGenericUDAFEvaluator(new GenericUDAFCount.GenericUDAFCountEvaluator());
    return agg;
}
Also used : AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) GenericUDAFCount(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount)

Example 22 with AggregationDesc

use of org.apache.hadoop.hive.ql.plan.AggregationDesc in project hive by apache.

the class TestVectorGroupByOperator method testKeyTypeAggregate.

private void testKeyTypeAggregate(String aggregateName, FakeVectorRowBatchFromObjectIterables data, Map<Object, Object> expected) throws HiveException {
    List<String> mapColumnNames = new ArrayList<String>();
    mapColumnNames.add("Key");
    mapColumnNames.add("Value");
    VectorizationContext ctx = new VectorizationContext("name", mapColumnNames);
    Set<Object> keys = new HashSet<Object>();
    AggregationDesc agg = buildAggregationDesc(ctx, aggregateName, GenericUDAFEvaluator.Mode.PARTIAL1, "Value", TypeInfoFactory.getPrimitiveTypeInfo(data.getTypes()[1]));
    ArrayList<AggregationDesc> aggs = new ArrayList<AggregationDesc>();
    aggs.add(agg);
    ArrayList<String> outputColumnNames = new ArrayList<String>();
    outputColumnNames.add("_col0");
    outputColumnNames.add("_col1");
    GroupByDesc desc = new GroupByDesc();
    VectorGroupByDesc vectorGroupByDesc = new VectorGroupByDesc();
    desc.setOutputColumnNames(outputColumnNames);
    desc.setAggregators(aggs);
    vectorGroupByDesc.setProcessingMode(ProcessingMode.HASH);
    ExprNodeDesc keyExp = buildColumnDesc(ctx, "Key", TypeInfoFactory.getPrimitiveTypeInfo(data.getTypes()[0]));
    ArrayList<ExprNodeDesc> keysDesc = new ArrayList<ExprNodeDesc>();
    keysDesc.add(keyExp);
    desc.setKeys(keysDesc);
    CompilationOpContext cCtx = new CompilationOpContext();
    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
    VectorGroupByOperator vgo = (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx, vectorGroupByDesc);
    if (vgo == null) {
        assertTrue(false);
    }
    FakeCaptureVectorToRowOutputOperator out = FakeCaptureVectorToRowOutputOperator.addCaptureOutputChild(cCtx, vgo);
    vgo.initialize(hconf, null);
    out.setOutputInspector(new FakeCaptureVectorToRowOutputOperator.OutputInspector() {

        private int rowIndex;

        private String aggregateName;

        private Map<Object, Object> expected;

        private Set<Object> keys;

        @Override
        public void inspectRow(Object row, int tag) throws HiveException {
            assertTrue(row instanceof Object[]);
            Object[] fields = (Object[]) row;
            assertEquals(2, fields.length);
            Object key = fields[0];
            Object keyValue = null;
            if (null == key) {
                keyValue = null;
            } else if (key instanceof ByteWritable) {
                ByteWritable bwKey = (ByteWritable) key;
                keyValue = bwKey.get();
            } else if (key instanceof ShortWritable) {
                ShortWritable swKey = (ShortWritable) key;
                keyValue = swKey.get();
            } else if (key instanceof IntWritable) {
                IntWritable iwKey = (IntWritable) key;
                keyValue = iwKey.get();
            } else if (key instanceof LongWritable) {
                LongWritable lwKey = (LongWritable) key;
                keyValue = lwKey.get();
            } else if (key instanceof TimestampWritableV2) {
                TimestampWritableV2 twKey = (TimestampWritableV2) key;
                keyValue = twKey.getTimestamp().toSqlTimestamp();
            } else if (key instanceof DoubleWritable) {
                DoubleWritable dwKey = (DoubleWritable) key;
                keyValue = dwKey.get();
            } else if (key instanceof FloatWritable) {
                FloatWritable fwKey = (FloatWritable) key;
                keyValue = fwKey.get();
            } else if (key instanceof BooleanWritable) {
                BooleanWritable bwKey = (BooleanWritable) key;
                keyValue = bwKey.get();
            } else if (key instanceof HiveDecimalWritable) {
                HiveDecimalWritable hdwKey = (HiveDecimalWritable) key;
                keyValue = hdwKey.getHiveDecimal();
            } else {
                Assert.fail(String.format("Not implemented key output type %s: %s", key.getClass().getName(), key));
            }
            String keyValueAsString = String.format("%s", keyValue);
            assertTrue(expected.containsKey(keyValue));
            Object expectedValue = expected.get(keyValue);
            Object value = fields[1];
            Validator validator = getValidator(aggregateName);
            validator.validate(keyValueAsString, expectedValue, new Object[] { value });
            keys.add(keyValue);
        }

        private FakeCaptureVectorToRowOutputOperator.OutputInspector init(String aggregateName, Map<Object, Object> expected, Set<Object> keys) {
            this.aggregateName = aggregateName;
            this.expected = expected;
            this.keys = keys;
            return this;
        }
    }.init(aggregateName, expected, keys));
    for (VectorizedRowBatch unit : data) {
        vgo.process(unit, 0);
    }
    vgo.close(false);
    List<Object> outBatchList = out.getCapturedRows();
    assertNotNull(outBatchList);
    assertEquals(expected.size(), outBatchList.size());
    assertEquals(expected.size(), keys.size());
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) ArrayList(java.util.ArrayList) DoubleWritable(org.apache.hadoop.hive.serde2.io.DoubleWritable) ShortWritable(org.apache.hadoop.hive.serde2.io.ShortWritable) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) LongWritable(org.apache.hadoop.io.LongWritable) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc) ByteWritable(org.apache.hadoop.hive.serde2.io.ByteWritable) IntWritable(org.apache.hadoop.io.IntWritable) HashSet(java.util.HashSet) HiveDecimalWritable(org.apache.hadoop.hive.serde2.io.HiveDecimalWritable) TimestampWritableV2(org.apache.hadoop.hive.serde2.io.TimestampWritableV2) FloatWritable(org.apache.hadoop.io.FloatWritable) BooleanWritable(org.apache.hadoop.io.BooleanWritable) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) FakeCaptureVectorToRowOutputOperator(org.apache.hadoop.hive.ql.exec.vector.util.FakeCaptureVectorToRowOutputOperator) Map(java.util.Map) HashMap(java.util.HashMap)

Example 23 with AggregationDesc

use of org.apache.hadoop.hive.ql.plan.AggregationDesc in project hive by apache.

the class TestVectorGroupByOperator method testMultiKey.

private void testMultiKey(String aggregateName, FakeVectorRowBatchFromObjectIterables data, HashMap<Object, Object> expected) throws HiveException {
    Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
    ArrayList<String> outputColumnNames = new ArrayList<String>();
    ArrayList<ExprNodeDesc> keysDesc = new ArrayList<ExprNodeDesc>();
    Set<Object> keys = new HashSet<Object>();
    // The types array tells us the number of columns in the data
    final String[] columnTypes = data.getTypes();
    // Columns 0..N-1 are keys. Column N is the aggregate value input
    int i = 0;
    for (; i < columnTypes.length - 1; ++i) {
        String columnName = String.format("_col%d", i);
        mapColumnNames.put(columnName, i);
        outputColumnNames.add(columnName);
    }
    mapColumnNames.put("value", i);
    outputColumnNames.add("value");
    VectorizationContext ctx = new VectorizationContext("name", outputColumnNames);
    ArrayList<AggregationDesc> aggs = new ArrayList(1);
    aggs.add(buildAggregationDesc(ctx, aggregateName, GenericUDAFEvaluator.Mode.PARTIAL1, "value", TypeInfoFactory.getPrimitiveTypeInfo(columnTypes[i])));
    for (i = 0; i < columnTypes.length - 1; ++i) {
        String columnName = String.format("_col%d", i);
        keysDesc.add(buildColumnDesc(ctx, columnName, TypeInfoFactory.getPrimitiveTypeInfo(columnTypes[i])));
    }
    GroupByDesc desc = new GroupByDesc();
    VectorGroupByDesc vectorGroupByDesc = new VectorGroupByDesc();
    desc.setOutputColumnNames(outputColumnNames);
    desc.setAggregators(aggs);
    desc.setKeys(keysDesc);
    vectorGroupByDesc.setProcessingMode(ProcessingMode.HASH);
    CompilationOpContext cCtx = new CompilationOpContext();
    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
    VectorGroupByOperator vgo = (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx, vectorGroupByDesc);
    FakeCaptureVectorToRowOutputOperator out = FakeCaptureVectorToRowOutputOperator.addCaptureOutputChild(cCtx, vgo);
    vgo.initialize(hconf, null);
    out.setOutputInspector(new FakeCaptureVectorToRowOutputOperator.OutputInspector() {

        private int rowIndex;

        private String aggregateName;

        private Map<Object, Object> expected;

        private Set<Object> keys;

        @Override
        public void inspectRow(Object row, int tag) throws HiveException {
            assertTrue(row instanceof Object[]);
            Object[] fields = (Object[]) row;
            assertEquals(columnTypes.length, fields.length);
            ArrayList<Object> keyValue = new ArrayList<Object>(columnTypes.length - 1);
            for (int i = 0; i < columnTypes.length - 1; ++i) {
                Object key = fields[i];
                if (null == key) {
                    keyValue.add(null);
                } else if (key instanceof Text) {
                    Text txKey = (Text) key;
                    keyValue.add(txKey.toString());
                } else if (key instanceof ByteWritable) {
                    ByteWritable bwKey = (ByteWritable) key;
                    keyValue.add(bwKey.get());
                } else if (key instanceof ShortWritable) {
                    ShortWritable swKey = (ShortWritable) key;
                    keyValue.add(swKey.get());
                } else if (key instanceof IntWritable) {
                    IntWritable iwKey = (IntWritable) key;
                    keyValue.add(iwKey.get());
                } else if (key instanceof LongWritable) {
                    LongWritable lwKey = (LongWritable) key;
                    keyValue.add(lwKey.get());
                } else if (key instanceof TimestampWritableV2) {
                    TimestampWritableV2 twKey = (TimestampWritableV2) key;
                    keyValue.add(twKey.getTimestamp());
                } else if (key instanceof DoubleWritable) {
                    DoubleWritable dwKey = (DoubleWritable) key;
                    keyValue.add(dwKey.get());
                } else if (key instanceof FloatWritable) {
                    FloatWritable fwKey = (FloatWritable) key;
                    keyValue.add(fwKey.get());
                } else if (key instanceof BooleanWritable) {
                    BooleanWritable bwKey = (BooleanWritable) key;
                    keyValue.add(bwKey.get());
                } else {
                    Assert.fail(String.format("Not implemented key output type %s: %s", key.getClass().getName(), key));
                }
            }
            String keyAsString = Arrays.deepToString(keyValue.toArray());
            assertTrue(expected.containsKey(keyValue));
            Object expectedValue = expected.get(keyValue);
            Object value = fields[columnTypes.length - 1];
            Validator validator = getValidator(aggregateName);
            validator.validate(keyAsString, expectedValue, new Object[] { value });
            keys.add(keyValue);
        }

        private FakeCaptureVectorToRowOutputOperator.OutputInspector init(String aggregateName, Map<Object, Object> expected, Set<Object> keys) {
            this.aggregateName = aggregateName;
            this.expected = expected;
            this.keys = keys;
            return this;
        }
    }.init(aggregateName, expected, keys));
    for (VectorizedRowBatch unit : data) {
        vgo.process(unit, 0);
    }
    vgo.close(false);
    List<Object> outBatchList = out.getCapturedRows();
    assertNotNull(outBatchList);
    assertEquals(expected.size(), outBatchList.size());
    assertEquals(expected.size(), keys.size());
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DoubleWritable(org.apache.hadoop.hive.serde2.io.DoubleWritable) ShortWritable(org.apache.hadoop.hive.serde2.io.ShortWritable) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) LongWritable(org.apache.hadoop.io.LongWritable) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc) ByteWritable(org.apache.hadoop.hive.serde2.io.ByteWritable) IntWritable(org.apache.hadoop.io.IntWritable) HashSet(java.util.HashSet) Text(org.apache.hadoop.io.Text) TimestampWritableV2(org.apache.hadoop.hive.serde2.io.TimestampWritableV2) FloatWritable(org.apache.hadoop.io.FloatWritable) BooleanWritable(org.apache.hadoop.io.BooleanWritable) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) FakeCaptureVectorToRowOutputOperator(org.apache.hadoop.hive.ql.exec.vector.util.FakeCaptureVectorToRowOutputOperator) Map(java.util.Map) HashMap(java.util.HashMap)

Example 24 with AggregationDesc

use of org.apache.hadoop.hive.ql.plan.AggregationDesc in project hive by apache.

the class TestVectorGroupByOperator method buildAggregationDesc.

private static AggregationDesc buildAggregationDesc(VectorizationContext ctx, String aggregate, GenericUDAFEvaluator.Mode mode, String column, TypeInfo typeInfo) {
    TypeInfo[] typeInfos = new TypeInfo[] { typeInfo };
    ArrayList<ExprNodeDesc> params = new ArrayList<ExprNodeDesc>(1);
    ExprNodeDesc inputColumn = buildColumnDesc(ctx, column, typeInfo);
    params.add(inputColumn);
    AggregationDesc agg = new AggregationDesc();
    agg.setGenericUDAFName(aggregate);
    agg.setMode(mode);
    agg.setParameters(params);
    final GenericUDAFEvaluator evaluator;
    try {
        switch(aggregate) {
            case "count":
                evaluator = new GenericUDAFCount.GenericUDAFCountEvaluator();
                break;
            case "min":
                evaluator = new GenericUDAFMin.GenericUDAFMinEvaluator();
                break;
            case "max":
                evaluator = new GenericUDAFMax.GenericUDAFMaxEvaluator();
                break;
            case "sum":
                evaluator = (new GenericUDAFSum()).getEvaluator(typeInfos);
                break;
            case "avg":
                evaluator = (new GenericUDAFAverage()).getEvaluator(typeInfos);
                break;
            case "variance":
            case "var":
            case "var_pop":
                evaluator = new GenericUDAFVariance.GenericUDAFVarianceEvaluator();
                break;
            case "var_samp":
                evaluator = new GenericUDAFVarianceSample.GenericUDAFVarianceSampleEvaluator();
                break;
            case "std":
            case "stddev":
            case "stddev_pop":
                evaluator = new GenericUDAFStd.GenericUDAFStdEvaluator();
                break;
            case "stddev_samp":
                evaluator = new GenericUDAFStdSample.GenericUDAFStdSampleEvaluator();
                break;
            default:
                throw new RuntimeException("Unexpected aggregate " + aggregate);
        }
    } catch (SemanticException e) {
        throw new RuntimeException(e);
    }
    agg.setGenericUDAFEvaluator(evaluator);
    return agg;
}
Also used : GenericUDAFEvaluator(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator) ArrayList(java.util.ArrayList) GenericUDAFCount(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount) GenericUDAFAverage(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage) GenericUDAFVariance(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) GenericUDAFMin(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) GenericUDAFSum(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum) GenericUDAFStdSample(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFStdSample) GenericUDAFVarianceSample(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVarianceSample) GenericUDAFStd(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFStd) GenericUDAFMax(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc)

Example 25 with AggregationDesc

use of org.apache.hadoop.hive.ql.plan.AggregationDesc in project hive by apache.

the class SemanticAnalyzer method genMapGroupByForSemijoin.

private // the
Operator genMapGroupByForSemijoin(// the
QB qb, // the
ArrayList<ASTNode> fields, // "tab.col"
Operator inputOperatorInfo, GroupByDesc.Mode mode) throws SemanticException {
    RowResolver groupByInputRowResolver = opParseCtx.get(inputOperatorInfo).getRowResolver();
    RowResolver groupByOutputRowResolver = new RowResolver();
    ArrayList<ExprNodeDesc> groupByKeys = new ArrayList<ExprNodeDesc>();
    ArrayList<String> outputColumnNames = new ArrayList<String>();
    ArrayList<AggregationDesc> aggregations = new ArrayList<AggregationDesc>();
    Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
    qb.getParseInfo();
    // join keys should only
    groupByOutputRowResolver.setIsExprResolver(true);
    for (int i = 0; i < fields.size(); ++i) {
        // get the group by keys to ColumnInfo
        ASTNode colName = fields.get(i);
        ExprNodeDesc grpByExprNode = genExprNodeDesc(colName, groupByInputRowResolver);
        groupByKeys.add(grpByExprNode);
        // generate output column names
        String field = getColumnInternalName(i);
        outputColumnNames.add(field);
        ColumnInfo colInfo2 = new ColumnInfo(field, grpByExprNode.getTypeInfo(), "", false);
        groupByOutputRowResolver.putExpression(colName, colInfo2);
        // establish mapping from the output column to the input column
        colExprMap.put(field, grpByExprNode);
    }
    // Generate group-by operator
    float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY);
    float memoryThreshold = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD);
    Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild(new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations, false, groupByMemoryUsage, memoryThreshold, null, false, -1, false), new RowSchema(groupByOutputRowResolver.getColumnInfos()), inputOperatorInfo), groupByOutputRowResolver);
    op.setColumnExprMap(colExprMap);
    return op;
}
Also used : AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc)

Aggregations

AggregationDesc (org.apache.hadoop.hive.ql.plan.AggregationDesc)40 ArrayList (java.util.ArrayList)36 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)36 GroupByDesc (org.apache.hadoop.hive.ql.plan.GroupByDesc)33 HashMap (java.util.HashMap)26 GroupByOperator (org.apache.hadoop.hive.ql.exec.GroupByOperator)25 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)23 RowSchema (org.apache.hadoop.hive.ql.exec.RowSchema)23 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)23 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)21 Operator (org.apache.hadoop.hive.ql.exec.Operator)19 Mode (org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode)16 SelectOperator (org.apache.hadoop.hive.ql.exec.SelectOperator)14 GenericUDAFEvaluator (org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator)14 Map (java.util.Map)12 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)12 LinkedHashMap (java.util.LinkedHashMap)11 AbstractMapJoinOperator (org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator)11 FileSinkOperator (org.apache.hadoop.hive.ql.exec.FileSinkOperator)11 FilterOperator (org.apache.hadoop.hive.ql.exec.FilterOperator)11