Search in sources :

Example 1 with FastBitSet

use of javolution.util.FastBitSet in project hive by apache.

the class VectorGroupByOperator method setupGroupingSets.

private void setupGroupingSets() {
    groupingSetsPresent = conf.isGroupingSetsPresent();
    if (!groupingSetsPresent) {
        groupingSets = null;
        groupingSetsPosition = -1;
        groupingSetsDummyVectorExpression = null;
        allGroupingSetsOverrideIsNulls = null;
        return;
    }
    groupingSets = ArrayUtils.toPrimitive(conf.getListGroupingSets().toArray(new Long[0]));
    groupingSetsPosition = conf.getGroupingSetPosition();
    allGroupingSetsOverrideIsNulls = new boolean[groupingSets.length][];
    int pos = 0;
    for (long groupingSet : groupingSets) {
        // Create the mapping corresponding to the grouping set
        // Assume all columns are null, except the dummy column is always non-null.
        boolean[] groupingSetsOverrideIsNull = new boolean[keyExpressions.length];
        Arrays.fill(groupingSetsOverrideIsNull, true);
        groupingSetsOverrideIsNull[groupingSetsPosition] = false;
        // Add keys of this grouping set.
        FastBitSet bitset = GroupByOperator.groupingSet2BitSet(groupingSet, groupingSetsPosition);
        for (int keyPos = bitset.nextClearBit(0); keyPos < groupingSetsPosition; keyPos = bitset.nextClearBit(keyPos + 1)) {
            groupingSetsOverrideIsNull[keyPos] = false;
        }
        allGroupingSetsOverrideIsNulls[pos] = groupingSetsOverrideIsNull;
        pos++;
    }
    // The last key column is the dummy grouping set id.
    // 
    // Figure out which (scratch) column was used so we can overwrite the dummy id.
    groupingSetsDummyVectorExpression = (ConstantVectorExpression) keyExpressions[groupingSetsPosition];
}
Also used : FastBitSet(javolution.util.FastBitSet)

Example 2 with FastBitSet

use of javolution.util.FastBitSet in project hive by apache.

the class GroupByOperator method initializeOp.

@Override
protected void initializeOp(Configuration hconf) throws HiveException {
    super.initializeOp(hconf);
    numRowsInput = 0;
    numRowsHashTbl = 0;
    heartbeatInterval = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVESENDHEARTBEAT);
    countAfterReport = 0;
    ObjectInspector rowInspector = inputObjInspectors[0];
    // init keyFields
    int numKeys = conf.getKeys().size();
    keyFields = new ExprNodeEvaluator[numKeys];
    keyObjectInspectors = new ObjectInspector[numKeys];
    currentKeyObjectInspectors = new ObjectInspector[numKeys];
    for (int i = 0; i < numKeys; i++) {
        keyFields[i] = ExprNodeEvaluatorFactory.get(conf.getKeys().get(i), hconf);
        keyObjectInspectors[i] = keyFields[i].initialize(rowInspector);
        currentKeyObjectInspectors[i] = ObjectInspectorUtils.getStandardObjectInspector(keyObjectInspectors[i], ObjectInspectorCopyOption.WRITABLE);
    }
    // Initialize the constants for the grouping sets, so that they can be re-used for
    // each row
    groupingSetsPresent = conf.isGroupingSetsPresent();
    if (groupingSetsPresent) {
        groupingSets = conf.getListGroupingSets();
        groupingSetsPosition = conf.getGroupingSetPosition();
        newKeysGroupingSets = new LongWritable[groupingSets.size()];
        groupingSetsBitSet = new FastBitSet[groupingSets.size()];
        int pos = 0;
        for (Long groupingSet : groupingSets) {
            // Create the mapping corresponding to the grouping set
            newKeysGroupingSets[pos] = new LongWritable(groupingSet);
            groupingSetsBitSet[pos] = groupingSet2BitSet(groupingSet, groupingSetsPosition);
            pos++;
        }
    }
    // initialize unionExpr for reduce-side
    // reduce KEY has union field as the last field if there are distinct
    // aggregates in group-by.
    List<? extends StructField> sfs = ((StructObjectInspector) rowInspector).getAllStructFieldRefs();
    if (sfs.size() > 0) {
        StructField keyField = sfs.get(0);
        if (keyField.getFieldName().toUpperCase().equals(Utilities.ReduceField.KEY.name())) {
            ObjectInspector keyObjInspector = keyField.getFieldObjectInspector();
            if (keyObjInspector instanceof StructObjectInspector) {
                List<? extends StructField> keysfs = ((StructObjectInspector) keyObjInspector).getAllStructFieldRefs();
                if (keysfs.size() > 0) {
                    // the last field is the union field, if any
                    StructField sf = keysfs.get(keysfs.size() - 1);
                    if (sf.getFieldObjectInspector().getCategory().equals(ObjectInspector.Category.UNION)) {
                        unionExprEval = ExprNodeEvaluatorFactory.get(new ExprNodeColumnDesc(TypeInfoUtils.getTypeInfoFromObjectInspector(sf.getFieldObjectInspector()), keyField.getFieldName() + "." + sf.getFieldName(), null, false), hconf);
                        unionExprEval.initialize(rowInspector);
                    }
                }
            }
        }
    }
    // init aggregationParameterFields
    List<AggregationDesc> aggrs = conf.getAggregators();
    aggregationParameterFields = new ExprNodeEvaluator[aggrs.size()][];
    aggregationParameterObjectInspectors = new ObjectInspector[aggrs.size()][];
    aggregationParameterStandardObjectInspectors = new ObjectInspector[aggrs.size()][];
    aggregationParameterObjects = new Object[aggrs.size()][];
    aggregationIsDistinct = new boolean[aggrs.size()];
    for (int i = 0; i < aggrs.size(); i++) {
        AggregationDesc aggr = aggrs.get(i);
        List<ExprNodeDesc> parameters = aggr.getParameters();
        aggregationParameterFields[i] = new ExprNodeEvaluator[parameters.size()];
        aggregationParameterObjectInspectors[i] = new ObjectInspector[parameters.size()];
        aggregationParameterStandardObjectInspectors[i] = new ObjectInspector[parameters.size()];
        aggregationParameterObjects[i] = new Object[parameters.size()];
        for (int j = 0; j < parameters.size(); j++) {
            aggregationParameterFields[i][j] = ExprNodeEvaluatorFactory.get(parameters.get(j), hconf);
            aggregationParameterObjectInspectors[i][j] = aggregationParameterFields[i][j].initialize(rowInspector);
            if (unionExprEval != null) {
                String[] names = parameters.get(j).getExprString().split("\\.");
                // parameters of the form : KEY.colx:t.coly
                if (Utilities.ReduceField.KEY.name().equals(names[0]) && names.length > 2) {
                    String name = names[names.length - 2];
                    int tag = Integer.parseInt(name.split("\\:")[1]);
                    if (aggr.getDistinct()) {
                        // is distinct
                        distinctKeyAggrs.computeIfAbsent(tag, t -> new HashSet<>()).add(i);
                    } else {
                        nonDistinctKeyAggrs.computeIfAbsent(tag, t -> new HashSet<>()).add(i);
                    }
                } else {
                    // will be KEY._COLx or VALUE._COLx
                    nonDistinctAggrs.add(i);
                }
            } else {
                if (aggr.getDistinct()) {
                    aggregationIsDistinct[i] = true;
                }
            }
            aggregationParameterStandardObjectInspectors[i][j] = ObjectInspectorUtils.getStandardObjectInspector(aggregationParameterObjectInspectors[i][j], ObjectInspectorCopyOption.WRITABLE);
            aggregationParameterObjects[i][j] = null;
        }
        if (parameters.size() == 0) {
            // for ex: count(*)
            nonDistinctAggrs.add(i);
        }
    }
    // init aggregationClasses
    aggregationEvaluators = new GenericUDAFEvaluator[conf.getAggregators().size()];
    for (int i = 0; i < aggregationEvaluators.length; i++) {
        AggregationDesc agg = conf.getAggregators().get(i);
        aggregationEvaluators[i] = agg.getGenericUDAFEvaluator();
    }
    MapredContext context = MapredContext.get();
    if (context != null) {
        for (GenericUDAFEvaluator genericUDAFEvaluator : aggregationEvaluators) {
            context.setup(genericUDAFEvaluator);
        }
    }
    // grouping id should be pruned, which is the last of key columns
    // see ColumnPrunerGroupByProc
    outputKeyLength = conf.pruneGroupingSetId() ? keyFields.length - 1 : keyFields.length;
    // init objectInspectors
    ObjectInspector[] objectInspectors = new ObjectInspector[outputKeyLength + aggregationEvaluators.length];
    for (int i = 0; i < outputKeyLength; i++) {
        objectInspectors[i] = currentKeyObjectInspectors[i];
    }
    for (int i = 0; i < aggregationEvaluators.length; i++) {
        objectInspectors[outputKeyLength + i] = aggregationEvaluators[i].init(conf.getAggregators().get(i).getMode(), aggregationParameterObjectInspectors[i]);
    }
    aggregationsParametersLastInvoke = new Object[conf.getAggregators().size()][];
    if ((conf.getMode() != GroupByDesc.Mode.HASH || conf.getBucketGroup()) && (!groupingSetsPresent)) {
        aggregations = newAggregations();
        hashAggr = false;
    } else {
        hashAggregations = new HashMap<KeyWrapper, AggregationBuffer[]>(256);
        aggregations = newAggregations();
        hashAggr = true;
        keyPositionsSize = new ArrayList<Integer>();
        aggrPositions = new List[aggregations.length];
        groupbyMapAggrInterval = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL);
        // compare every groupbyMapAggrInterval rows
        numRowsCompareHashAggr = groupbyMapAggrInterval;
        minReductionHashAggr = conf.getMinReductionHashAggr();
    }
    List<String> fieldNames = new ArrayList<String>(conf.getOutputColumnNames());
    outputObjInspector = ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, Arrays.asList(objectInspectors));
    KeyWrapperFactory keyWrapperFactory = new KeyWrapperFactory(keyFields, keyObjectInspectors, currentKeyObjectInspectors);
    newKeys = keyWrapperFactory.getKeyWrapper();
    isTez = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez");
    isLlap = LlapDaemonInfo.INSTANCE.isLlap();
    numExecutors = isLlap ? LlapDaemonInfo.INSTANCE.getNumExecutors() : 1;
    firstRow = true;
    // is not known, estimate that based on the number of entries
    if (hashAggr) {
        computeMaxEntriesHashAggr();
    }
    memoryMXBean = ManagementFactory.getMemoryMXBean();
    maxMemory = isTez ? getConf().getMaxMemoryAvailable() : memoryMXBean.getHeapMemoryUsage().getMax();
    memoryThreshold = this.getConf().getMemoryThreshold();
    LOG.info("isTez: {} isLlap: {} numExecutors: {} maxMemory: {}", isTez, isLlap, numExecutors, maxMemory);
}
Also used : Arrays(java.util.Arrays) LazyString(org.apache.hadoop.hive.serde2.lazy.LazyString) Text(org.apache.hadoop.io.Text) LongWritable(org.apache.hadoop.io.LongWritable) MemoryMXBean(java.lang.management.MemoryMXBean) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) ByteArrayRef(org.apache.hadoop.hive.serde2.lazy.ByteArrayRef) LazyBinary(org.apache.hadoop.hive.serde2.lazy.LazyBinary) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) LlapDaemonInfo(org.apache.hadoop.hive.llap.LlapDaemonInfo) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) Timestamp(java.sql.Timestamp) Set(java.util.Set) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) List(java.util.List) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) UnionObject(org.apache.hadoop.hive.serde2.objectinspector.UnionObject) GenericUDAFEvaluator(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) OperatorType(org.apache.hadoop.hive.ql.plan.api.OperatorType) TypeInfoUtils(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils) LazyPrimitive(org.apache.hadoop.hive.serde2.lazy.LazyPrimitive) PrimitiveCategory(org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory) Mode(org.apache.hadoop.hive.ql.plan.GroupByDesc.Mode) HashMap(java.util.HashMap) ObjectInspectorUtils(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ObjectInspectorCopyOption(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption) FastBitSet(javolution.util.FastBitSet) LazyStringObjectInspector(org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector) BytesWritable(org.apache.hadoop.io.BytesWritable) ManagementFactory(java.lang.management.ManagementFactory) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) Iterator(java.util.Iterator) TimestampTZ(org.apache.hadoop.hive.common.type.TimestampTZ) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc) AggregationBuffer(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.AggregationBuffer) HiveConf(org.apache.hadoop.hive.conf.HiveConf) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) Field(java.lang.reflect.Field) OpParseContext(org.apache.hadoop.hive.ql.parse.OpParseContext) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) JobConf(org.apache.hadoop.mapred.JobConf) ObjectInspectorFactory(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory) LazyBinaryObjectInspector(org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyBinaryObjectInspector) ExprNodeDescUtils(org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils) GenericUDAFEvaluator(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator) ArrayList(java.util.ArrayList) LazyString(org.apache.hadoop.hive.serde2.lazy.LazyString) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) LongWritable(org.apache.hadoop.io.LongWritable) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) HashSet(java.util.HashSet) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) LazyStringObjectInspector(org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector) LazyBinaryObjectInspector(org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyBinaryObjectInspector) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector)

Example 3 with FastBitSet

use of javolution.util.FastBitSet in project hive by apache.

the class TestFMSketchSerialization method deserialize.

private FastBitSet[] deserialize(String s, int numBitVectors) {
    FastBitSet[] b = new FastBitSet[numBitVectors];
    for (int j = 0; j < numBitVectors; j++) {
        b[j] = new FastBitSet(FMSketch.BIT_VECTOR_SIZE);
        b[j].clear();
    }
    int vectorIndex = 0;
    /*
     * Parse input string to obtain the indexes that are set in the bitvector.
     * When a toString() is called on a FastBitSet object to serialize it, the
     * serialization adds { and } to the beginning and end of the return String.
     * Skip "{", "}", ",", " " in the input string.
     */
    for (int i = 1; i < s.length() - 1; ) {
        char c = s.charAt(i);
        i = i + 1;
        // Move on to the next bit vector
        if (c == '}') {
            vectorIndex = vectorIndex + 1;
        }
        // Encountered a numeric value; Extract out the entire number
        if (c >= '0' && c <= '9') {
            String t = new String();
            t = t + c;
            c = s.charAt(i);
            i = i + 1;
            while (c != ',' && c != '}') {
                t = t + c;
                c = s.charAt(i);
                i = i + 1;
            }
            int bitIndex = Integer.parseInt(t);
            assert (bitIndex >= 0);
            assert (vectorIndex < numBitVectors);
            b[vectorIndex].set(bitIndex);
            if (c == '}') {
                vectorIndex = vectorIndex + 1;
            }
        }
    }
    return b;
}
Also used : FastBitSet(javolution.util.FastBitSet)

Example 4 with FastBitSet

use of javolution.util.FastBitSet in project hive by apache.

the class NumDistinctValueEstimator method deserialize.

/* Deserializes from string to FastBitSet; Creates a NumDistinctValueEstimator object and
   * returns it.
   */
private FastBitSet[] deserialize(String s, int numBitVectors) {
    FastBitSet[] b = new FastBitSet[numBitVectors];
    for (int j = 0; j < numBitVectors; j++) {
        b[j] = new FastBitSet(BIT_VECTOR_SIZE);
        b[j].clear();
    }
    int vectorIndex = 0;
    /* Parse input string to obtain the indexes that are set in the bitvector.
     * When a toString() is called on a FastBitSet object to serialize it, the serialization
     * adds { and } to the beginning and end of the return String.
     * Skip "{", "}", ",", " " in the input string.
     */
    for (int i = 1; i < s.length() - 1; ) {
        char c = s.charAt(i);
        i = i + 1;
        // Move on to the next bit vector
        if (c == '}') {
            vectorIndex = vectorIndex + 1;
        }
        // Encountered a numeric value; Extract out the entire number
        if (c >= '0' && c <= '9') {
            String t = new String();
            t = t + c;
            c = s.charAt(i);
            i = i + 1;
            while (c != ',' && c != '}') {
                t = t + c;
                c = s.charAt(i);
                i = i + 1;
            }
            int bitIndex = Integer.parseInt(t);
            assert (bitIndex >= 0);
            assert (vectorIndex < numBitVectors);
            b[vectorIndex].set(bitIndex);
            if (c == '}') {
                vectorIndex = vectorIndex + 1;
            }
        }
    }
    return b;
}
Also used : FastBitSet(javolution.util.FastBitSet)

Example 5 with FastBitSet

use of javolution.util.FastBitSet in project hive by apache.

the class NumDistinctValueEstimator method deserialize.

/* Deserializes from string to FastBitSet; Creates a NumDistinctValueEstimator object and
   * returns it.
   */
private FastBitSet[] deserialize(String s, int numBitVectors) {
    FastBitSet[] b = new FastBitSet[numBitVectors];
    for (int j = 0; j < numBitVectors; j++) {
        b[j] = new FastBitSet(BIT_VECTOR_SIZE);
        b[j].clear();
    }
    int vectorIndex = 0;
    /* Parse input string to obtain the indexes that are set in the bitvector.
     * When a toString() is called on a FastBitSet object to serialize it, the serialization
     * adds { and } to the beginning and end of the return String.
     * Skip "{", "}", ",", " " in the input string.
     */
    for (int i = 1; i < s.length() - 1; ) {
        char c = s.charAt(i);
        i = i + 1;
        // Move on to the next bit vector
        if (c == '}') {
            vectorIndex = vectorIndex + 1;
        }
        // Encountered a numeric value; Extract out the entire number
        if (c >= '0' && c <= '9') {
            String t = new String();
            t = t + c;
            c = s.charAt(i);
            i = i + 1;
            while (c != ',' && c != '}') {
                t = t + c;
                c = s.charAt(i);
                i = i + 1;
            }
            int bitIndex = Integer.parseInt(t);
            assert (bitIndex >= 0);
            assert (vectorIndex < numBitVectors);
            b[vectorIndex].set(bitIndex);
            if (c == '}') {
                vectorIndex = vectorIndex + 1;
            }
        }
    }
    return b;
}
Also used : FastBitSet(javolution.util.FastBitSet)

Aggregations

FastBitSet (javolution.util.FastBitSet)7 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2 LazyBinaryObjectInspector (org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyBinaryObjectInspector)2 LazyStringObjectInspector (org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector)2 ObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector)2 ManagementFactory (java.lang.management.ManagementFactory)1 MemoryMXBean (java.lang.management.MemoryMXBean)1 Field (java.lang.reflect.Field)1 Timestamp (java.sql.Timestamp)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 Iterator (java.util.Iterator)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 Configuration (org.apache.hadoop.conf.Configuration)1 TimestampTZ (org.apache.hadoop.hive.common.type.TimestampTZ)1 HiveConf (org.apache.hadoop.hive.conf.HiveConf)1