Search in sources :

Example 6 with Statistics

use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.

the class ConvertJoinMapJoin method checkShuffleSizeForLargeTable.

/* Returns true if it passes the test, false otherwise. */
private boolean checkShuffleSizeForLargeTable(JoinOperator joinOp, int position, OptimizeTezProcContext context) {
    long max = HiveConf.getLongVar(context.parseContext.getConf(), HiveConf.ConfVars.HIVECONVERTJOINMAXSHUFFLESIZE);
    if (max < 1) {
        // Max is disabled, we can safely return false
        return false;
    }
    // Evaluate
    ReduceSinkOperator rsOp = (ReduceSinkOperator) joinOp.getParentOperators().get(position);
    Statistics inputStats = rsOp.getStatistics();
    long inputSize = inputStats.getDataSize();
    LOG.debug("Estimated size for input {}: {}; Max size for DPHJ conversion: {}", position, inputSize, max);
    if (inputSize > max) {
        LOG.debug("Size of input is greater than the max; " + "we do not convert to DPHJ");
        return false;
    }
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics)

Example 7 with Statistics

use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.

the class ConvertJoinMapJoin method getMapJoinConversionPos.

/**
 * Obtain big table position for join.
 *
 * @param joinOp join operator
 * @param context optimization context
 * @param buckets bucket count for Bucket Map Join conversion consideration or reduce count
 * for Dynamic Hash Join conversion consideration
 * @param skipJoinTypeChecks whether to skip join type checking
 * @param maxSize size threshold for Map Join conversion
 * @param checkMapJoinThresholds whether to check thresholds to convert to Map Join
 * @return returns big table position or -1 if it cannot be determined
 * @throws SemanticException
 */
public int getMapJoinConversionPos(JoinOperator joinOp, OptimizeTezProcContext context, int buckets, boolean skipJoinTypeChecks, long maxSize, boolean checkMapJoinThresholds) throws SemanticException {
    if (!skipJoinTypeChecks) {
        /*
       * HIVE-9038: Join tests fail in tez when we have more than 1 join on the same key and there is
       * an outer join down the join tree that requires filterTag. We disable this conversion to map
       * join here now. We need to emulate the behavior of HashTableSinkOperator as in MR or create a
       * new operation to be able to support this. This seems like a corner case enough to special
       * case this for now.
       */
        if (joinOp.getConf().getConds().length > 1) {
            if (hasOuterJoin(joinOp)) {
                return -1;
            }
        }
    }
    Set<Integer> bigTableCandidateSet = MapJoinProcessor.getBigTableCandidates(joinOp.getConf().getConds());
    int bigTablePosition = -1;
    // big input cumulative row count
    long bigInputCumulativeCardinality = -1L;
    // stats of the big input
    Statistics bigInputStat = null;
    // bigTableFound means we've encountered a table that's bigger than the
    // max. This table is either the the big table or we cannot convert.
    boolean foundInputNotFittingInMemory = false;
    // total size of the inputs
    long totalSize = 0;
    // convert to DPHJ
    boolean convertDPHJ = false;
    for (int pos = 0; pos < joinOp.getParentOperators().size(); pos++) {
        Operator<? extends OperatorDesc> parentOp = joinOp.getParentOperators().get(pos);
        Statistics currInputStat = parentOp.getStatistics();
        if (currInputStat == null) {
            LOG.warn("Couldn't get statistics from: " + parentOp);
            return -1;
        }
        long inputSize = currInputStat.getDataSize();
        boolean currentInputNotFittingInMemory = false;
        if ((bigInputStat == null) || (inputSize > bigInputStat.getDataSize())) {
            if (foundInputNotFittingInMemory) {
                // on size and there's another one that's bigger.
                return -1;
            }
            if (inputSize / buckets > maxSize) {
                if (!bigTableCandidateSet.contains(pos)) {
                    // big for the map side.
                    return -1;
                }
                currentInputNotFittingInMemory = true;
                foundInputNotFittingInMemory = true;
            }
        }
        long currentInputCumulativeCardinality;
        if (foundInputNotFittingInMemory) {
            currentInputCumulativeCardinality = -1L;
        } else {
            Long cardinality = computeCumulativeCardinality(parentOp);
            if (cardinality == null) {
                // We could not get stats, we cannot convert
                return -1;
            }
            currentInputCumulativeCardinality = cardinality;
        }
        // This input is the big table if it is contained in the big candidates set, and either:
        // 1) we have not chosen a big table yet, or
        // 2) it has been chosen as the big table above, or
        // 3) the cumulative cardinality for this input is higher, or
        // 4) the cumulative cardinality is equal, but the size is bigger,
        boolean selectedBigTable = bigTableCandidateSet.contains(pos) && (bigInputStat == null || currentInputNotFittingInMemory || (!foundInputNotFittingInMemory && (currentInputCumulativeCardinality > bigInputCumulativeCardinality || (currentInputCumulativeCardinality == bigInputCumulativeCardinality && inputSize > bigInputStat.getDataSize()))));
        if (bigInputStat != null && selectedBigTable) {
            // We are replacing the current big table with a new one, thus
            // we need to count the current one as a map table then.
            totalSize += bigInputStat.getDataSize();
            // for HashMap
            if (checkMapJoinThresholds && !checkNumberOfEntriesForHashTable(joinOp, bigTablePosition, context)) {
                convertDPHJ = true;
            }
        } else if (!selectedBigTable) {
            // This is not the first table and we are not using it as big table,
            // in fact, we're adding this table as a map table
            totalSize += inputSize;
            // for HashMap
            if (checkMapJoinThresholds && !checkNumberOfEntriesForHashTable(joinOp, pos, context)) {
                convertDPHJ = true;
            }
        }
        if (totalSize / buckets > maxSize) {
            // hence cannot convert.
            return -1;
        }
        if (selectedBigTable) {
            bigTablePosition = pos;
            bigInputCumulativeCardinality = currentInputCumulativeCardinality;
            bigInputStat = currInputStat;
        }
    }
    // Check if size of data to shuffle (larger table) is less than given max size
    if (checkMapJoinThresholds && convertDPHJ && checkShuffleSizeForLargeTable(joinOp, bigTablePosition, context)) {
        LOG.debug("Conditions to convert to MapJoin are not met");
        return -1;
    }
    // We store the total memory that this MapJoin is going to use,
    // which is calculated as totalSize/buckets, with totalSize
    // equal to sum of small tables size.
    joinOp.getConf().setInMemoryDataSize(totalSize / buckets);
    return bigTablePosition;
}
Also used : Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics)

Example 8 with Statistics

use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.

the class ConvertJoinMapJoin method checkNumberOfEntriesForHashTable.

/* Returns true if it passes the test, false otherwise. */
private boolean checkNumberOfEntriesForHashTable(JoinOperator joinOp, int position, OptimizeTezProcContext context) {
    long max = HiveConf.getLongVar(context.parseContext.getConf(), HiveConf.ConfVars.HIVECONVERTJOINMAXENTRIESHASHTABLE);
    if (max < 1) {
        // Max is disabled, we can safely return true
        return true;
    }
    // Calculate number of different entries and evaluate
    ReduceSinkOperator rsOp = (ReduceSinkOperator) joinOp.getParentOperators().get(position);
    List<String> keys = StatsUtils.getQualifedReducerKeyNames(rsOp.getConf().getOutputKeyColumnNames());
    Statistics inputStats = rsOp.getStatistics();
    List<ColStatistics> columnStats = new ArrayList<>();
    for (String key : keys) {
        ColStatistics cs = inputStats.getColumnStatisticsFromColName(key);
        if (cs == null) {
            LOG.debug("Couldn't get statistics for: {}", key);
            return true;
        }
        columnStats.add(cs);
    }
    long numRows = inputStats.getNumRows();
    long estimation = estimateNDV(numRows, columnStats);
    LOG.debug("Estimated NDV for input {}: {}; Max NDV for MapJoin conversion: {}", position, estimation, max);
    if (estimation > max) {
        // Estimation larger than max
        LOG.debug("Number of different entries for HashTable is greater than the max; " + "we do not convert to MapJoin");
        return false;
    }
    // We can proceed with the conversion
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) ArrayList(java.util.ArrayList) Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics)

Example 9 with Statistics

use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.

the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.

public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
    // remove the tag for in-memory side of mapjoin
    parentRS.getConf().setSkipTag(true);
    parentRS.setSkipTag(true);
    // Mark this small table as being processed
    if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
        context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
    }
    List<BaseWork> mapJoinWork = null;
    /*
     *  if there was a pre-existing work generated for the big-table mapjoin side,
     *  we need to hook the work generated for the RS (associated with the RS-MJ pattern)
     *  with the pre-existing work.
     *
     *  Otherwise, we need to associate that the mapjoin op
     *  to be linked to the RS work (associated with the RS-MJ pattern).
     *
     */
    mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
    BaseWork parentWork = getMapJoinParentWork(context, parentRS);
    // set the link between mapjoin and parent vertex
    int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
    if (pos == -1) {
        throw new SemanticException("Cannot find position of parent in mapjoin");
    }
    MapJoinDesc joinConf = mapJoinOp.getConf();
    long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
    long tableSize = Long.MAX_VALUE;
    Statistics stats = parentRS.getStatistics();
    if (stats != null) {
        keyCount = rowCount = stats.getNumRows();
        if (keyCount <= 0) {
            keyCount = rowCount = Long.MAX_VALUE;
        }
        tableSize = stats.getDataSize();
        ArrayList<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
        if (keyCols != null && !keyCols.isEmpty()) {
            // See if we can arrive at a smaller number using distinct stats from key columns.
            long maxKeyCount = 1;
            String prefix = Utilities.ReduceField.KEY.toString();
            for (String keyCol : keyCols) {
                ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
                ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
                if (cs == null || cs.getCountDistint() <= 0) {
                    maxKeyCount = Long.MAX_VALUE;
                    break;
                }
                maxKeyCount *= cs.getCountDistint();
                if (maxKeyCount >= keyCount) {
                    break;
                }
            }
            keyCount = Math.min(maxKeyCount, keyCount);
        }
        if (joinConf.isBucketMapJoin()) {
            OpTraits opTraits = mapJoinOp.getOpTraits();
            bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
            if (bucketCount > 0) {
                // We cannot obtain a better estimate without CustomPartitionVertex providing it
                // to us somehow; in which case using statistics would be completely unnecessary.
                keyCount /= bucketCount;
                tableSize /= bucketCount;
            }
        } else if (joinConf.isDynamicPartitionHashJoin()) {
            // For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
            bucketCount = parentRS.getConf().getNumReducers();
            keyCount /= bucketCount;
            tableSize /= bucketCount;
        }
    }
    if (keyCount == 0) {
        keyCount = 1;
    }
    if (tableSize == 0) {
        tableSize = 1;
    }
    LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = " + joinConf.isBucketMapJoin() + "), pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
    joinConf.getParentToInput().put(pos, parentWork.getName());
    if (keyCount != Long.MAX_VALUE) {
        joinConf.getParentKeyCounts().put(pos, keyCount);
    }
    joinConf.getParentDataSizes().put(pos, tableSize);
    int numBuckets = -1;
    EdgeType edgeType = EdgeType.BROADCAST_EDGE;
    if (joinConf.isBucketMapJoin()) {
        numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
        /*
       * Here, we can be in one of 4 states.
       *
       * 1. If map join work is null implies that we have not yet traversed the big table side. We
       * just need to see if we can find a reduce sink operator in the big table side. This would
       * imply a reduce side operation.
       *
       * 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
       *
       * 3. If we have already created a work item for the big table side, we need to see if we can
       * find a table scan operator in the big table side. This would imply a map side operation.
       *
       * 4. If we don't find a table scan operator, it has to be a reduce side operation.
       */
        if (mapJoinWork == null) {
            Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
            if (rootOp == null) {
                // likely we found a table scan operator
                edgeType = EdgeType.CUSTOM_EDGE;
            } else {
                // we have found a reduce sink
                edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
            }
        } else {
            Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
            if (rootOp != null) {
                // likely we found a table scan operator
                edgeType = EdgeType.CUSTOM_EDGE;
            } else {
                // we have found a reduce sink
                edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
            }
        }
    } else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
        if (parentRS.getConf().isForwarding()) {
            edgeType = EdgeType.ONE_TO_ONE_EDGE;
        } else {
            edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
        }
    }
    if (edgeType == EdgeType.CUSTOM_EDGE) {
        // disable auto parallelism for bucket map joins
        parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
    }
    TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            // link the work with the work associated with the reduce sink that triggered this rule
            TezWork tezWork = context.currentTask.getWork();
            LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
            tezWork.connect(parentWork, myWork, edgeProp);
            if (edgeType == EdgeType.CUSTOM_EDGE) {
                tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
            }
            ReduceSinkOperator r = null;
            if (context.connectedReduceSinks.contains(parentRS)) {
                LOG.debug("Cloning reduce sink " + parentRS + " for multi-child broadcast edge");
                // we've already set this one up. Need to clone for the next work.
                r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
                context.clonedReduceSinks.add(r);
            } else {
                r = parentRS;
            }
            // remember the output name of the reduce sink
            r.getConf().setOutputName(myWork.getName());
            context.connectedReduceSinks.add(r);
        }
    }
    // remember in case we need to connect additional work later
    Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
    if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
        linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
    } else {
        linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
    }
    linkWorkMap.put(parentWork, edgeProp);
    context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
    List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
    if (reduceSinks == null) {
        reduceSinks = new ArrayList<ReduceSinkOperator>();
    }
    reduceSinks.add(parentRS);
    context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
    // create the dummy operators
    List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
    // create an new operator: HashTableDummyOperator, which share the table desc
    HashTableDummyDesc desc = new HashTableDummyDesc();
    @SuppressWarnings("unchecked") HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
    TableDesc tbl;
    // need to create the correct table descriptor for key/value
    RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
    tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
    dummyOp.getConf().setTbl(tbl);
    Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
    List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
    StringBuilder keyOrder = new StringBuilder();
    StringBuilder keyNullOrder = new StringBuilder();
    for (ExprNodeDesc k : keyCols) {
        keyOrder.append("+");
        keyNullOrder.append("a");
    }
    TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
    mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
    // let the dummy op be the parent of mapjoin op
    mapJoinOp.replaceParent(parentRS, dummyOp);
    List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
    dummyChildren.add(mapJoinOp);
    dummyOp.setChildOperators(dummyChildren);
    dummyOperators.add(dummyOp);
    // cut the operator tree so as to not retain connections from the parent RS downstream
    List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
    int childIndex = childOperators.indexOf(mapJoinOp);
    childOperators.remove(childIndex);
    // at task startup
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
            myWork.addDummyOp(dummyOp);
        }
    }
    if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
        for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
            dummyOperators.add(op);
        }
    }
    context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) TezEdgeProperty(org.apache.hadoop.hive.ql.plan.TezEdgeProperty) ArrayList(java.util.ArrayList) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) ArrayList(java.util.ArrayList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HashTableDummyDesc(org.apache.hadoop.hive.ql.plan.HashTableDummyDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 10 with Statistics

use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.

the class RelOptHiveTable method updateColStats.

private void updateColStats(Set<Integer> projIndxLst, boolean allowNullColumnForMissingStats) {
    List<String> nonPartColNamesThatRqrStats = new ArrayList<String>();
    List<Integer> nonPartColIndxsThatRqrStats = new ArrayList<Integer>();
    List<String> partColNamesThatRqrStats = new ArrayList<String>();
    List<Integer> partColIndxsThatRqrStats = new ArrayList<Integer>();
    Set<String> colNamesFailedStats = new HashSet<String>();
    // 1. Separate required columns to Non Partition and Partition Cols
    ColumnInfo tmp;
    for (Integer pi : projIndxLst) {
        if (hiveColStatsMap.get(pi) == null) {
            if ((tmp = hiveNonPartitionColsMap.get(pi)) != null) {
                nonPartColNamesThatRqrStats.add(tmp.getInternalName());
                nonPartColIndxsThatRqrStats.add(pi);
            } else if ((tmp = hivePartitionColsMap.get(pi)) != null) {
                partColNamesThatRqrStats.add(tmp.getInternalName());
                partColIndxsThatRqrStats.add(pi);
            } else {
                noColsMissingStats.getAndIncrement();
                String logMsg = "Unable to find Column Index: " + pi + ", in " + hiveTblMetadata.getCompleteName();
                LOG.error(logMsg);
                throw new RuntimeException(logMsg);
            }
        }
    }
    if (null == partitionList) {
        // We could be here either because its an unpartitioned table or because
        // there are no pruning predicates on a partitioned table.
        computePartitionList(hiveConf, null, new HashSet<Integer>());
    }
    ColumnStatsList colStatsCached = colStatsCache.get(partitionList.getKey());
    if (colStatsCached == null) {
        colStatsCached = new ColumnStatsList();
        colStatsCache.put(partitionList.getKey(), colStatsCached);
    }
    // 2. Obtain Col Stats for Non Partition Cols
    if (nonPartColNamesThatRqrStats.size() > 0) {
        List<ColStatistics> hiveColStats = new ArrayList<ColStatistics>();
        if (!hiveTblMetadata.isPartitioned()) {
            // 2.1 Handle the case for unpartitioned table.
            try {
                Statistics stats = StatsUtils.collectStatistics(hiveConf, null, hiveTblMetadata, hiveNonPartitionCols, nonPartColNamesThatRqrStats, colStatsCached, nonPartColNamesThatRqrStats, true);
                rowCount = stats.getNumRows();
                for (String c : nonPartColNamesThatRqrStats) {
                    ColStatistics cs = stats.getColumnStatisticsFromColName(c);
                    if (cs != null) {
                        hiveColStats.add(cs);
                    }
                }
                colStatsCached.updateState(stats.getColumnStatsState());
                // 2.1.1 Record Column Names that we needed stats for but couldn't
                if (hiveColStats.isEmpty()) {
                    colNamesFailedStats.addAll(nonPartColNamesThatRqrStats);
                } else if (hiveColStats.size() != nonPartColNamesThatRqrStats.size()) {
                    Set<String> setOfFiledCols = new HashSet<String>(nonPartColNamesThatRqrStats);
                    Set<String> setOfObtainedColStats = new HashSet<String>();
                    for (ColStatistics cs : hiveColStats) {
                        setOfObtainedColStats.add(cs.getColumnName());
                    }
                    setOfFiledCols.removeAll(setOfObtainedColStats);
                    colNamesFailedStats.addAll(setOfFiledCols);
                } else {
                    // Column stats in hiveColStats might not be in the same order as the columns in
                    // nonPartColNamesThatRqrStats. reorder hiveColStats so we can build hiveColStatsMap
                    // using nonPartColIndxsThatRqrStats as below
                    Map<String, ColStatistics> columnStatsMap = new HashMap<String, ColStatistics>(hiveColStats.size());
                    for (ColStatistics cs : hiveColStats) {
                        columnStatsMap.put(cs.getColumnName(), cs);
                        // stats are not available
                        if (cs.isEstimated()) {
                            colNamesFailedStats.add(cs.getColumnName());
                        }
                    }
                    hiveColStats.clear();
                    for (String colName : nonPartColNamesThatRqrStats) {
                        hiveColStats.add(columnStatsMap.get(colName));
                    }
                }
            } catch (HiveException e) {
                String logMsg = "Collecting stats for table: " + hiveTblMetadata.getTableName() + " failed.";
                LOG.error(logMsg, e);
                throw new RuntimeException(logMsg, e);
            }
        } else {
            // 2.2 Obtain col stats for partitioned table.
            try {
                if (partitionList.getNotDeniedPartns().isEmpty()) {
                    // no need to make a metastore call
                    rowCount = 0;
                    hiveColStats = new ArrayList<ColStatistics>();
                    for (int i = 0; i < nonPartColNamesThatRqrStats.size(); i++) {
                        // add empty stats object for each column
                        hiveColStats.add(new ColStatistics(nonPartColNamesThatRqrStats.get(i), hiveNonPartitionColsMap.get(nonPartColIndxsThatRqrStats.get(i)).getTypeName()));
                    }
                    colNamesFailedStats.clear();
                    colStatsCached.updateState(State.COMPLETE);
                } else {
                    Statistics stats = StatsUtils.collectStatistics(hiveConf, partitionList, hiveTblMetadata, hiveNonPartitionCols, nonPartColNamesThatRqrStats, colStatsCached, nonPartColNamesThatRqrStats, true);
                    rowCount = stats.getNumRows();
                    hiveColStats = new ArrayList<ColStatistics>();
                    for (String c : nonPartColNamesThatRqrStats) {
                        ColStatistics cs = stats.getColumnStatisticsFromColName(c);
                        if (cs != null) {
                            hiveColStats.add(cs);
                            if (cs.isEstimated()) {
                                colNamesFailedStats.add(c);
                            }
                        } else {
                            colNamesFailedStats.add(c);
                        }
                    }
                    colStatsCached.updateState(stats.getColumnStatsState());
                }
            } catch (HiveException e) {
                String logMsg = "Collecting stats failed.";
                LOG.error(logMsg, e);
                throw new RuntimeException(logMsg, e);
            }
        }
        if (hiveColStats != null && hiveColStats.size() == nonPartColNamesThatRqrStats.size()) {
            for (int i = 0; i < hiveColStats.size(); i++) {
                // the columns in nonPartColIndxsThatRqrStats/nonPartColNamesThatRqrStats/hiveColStats
                // are in same order
                hiveColStatsMap.put(nonPartColIndxsThatRqrStats.get(i), hiveColStats.get(i));
                colStatsCached.put(hiveColStats.get(i).getColumnName(), hiveColStats.get(i));
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Stats for column " + hiveColStats.get(i).getColumnName() + " in table " + hiveTblMetadata.getTableName() + " stored in cache");
                    LOG.debug(hiveColStats.get(i).toString());
                }
            }
        }
    }
    // 3. Obtain Stats for Partition Cols
    if (colNamesFailedStats.isEmpty() && !partColNamesThatRqrStats.isEmpty()) {
        ColStatistics cStats = null;
        for (int i = 0; i < partColNamesThatRqrStats.size(); i++) {
            cStats = StatsUtils.getColStatsForPartCol(hivePartitionColsMap.get(partColIndxsThatRqrStats.get(i)), new PartitionIterable(partitionList.getNotDeniedPartns()), hiveConf);
            hiveColStatsMap.put(partColIndxsThatRqrStats.get(i), cStats);
            colStatsCached.put(cStats.getColumnName(), cStats);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Stats for column " + cStats.getColumnName() + " in table " + hiveTblMetadata.getTableName() + " stored in cache");
                LOG.debug(cStats.toString());
            }
        }
    }
    // 4. Warn user if we could get stats for required columns
    if (!colNamesFailedStats.isEmpty()) {
        String logMsg = "No Stats for " + hiveTblMetadata.getCompleteName() + ", Columns: " + getColNamesForLogging(colNamesFailedStats);
        noColsMissingStats.getAndAdd(colNamesFailedStats.size());
        if (allowNullColumnForMissingStats) {
            LOG.warn(logMsg);
            HiveConf conf = SessionState.getSessionConf();
            if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_SHOW_WARNINGS)) {
                LogHelper console = SessionState.getConsole();
                console.printInfo(logMsg);
            }
        } else {
            LOG.error(logMsg);
            throw new RuntimeException(logMsg);
        }
    }
}
Also used : ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) Set(java.util.Set) HashSet(java.util.HashSet) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LogHelper(org.apache.hadoop.hive.ql.session.SessionState.LogHelper) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) UniqueConstraint(org.apache.hadoop.hive.ql.metadata.UniqueConstraint) RelReferentialConstraint(org.apache.calcite.rel.RelReferentialConstraint) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PartitionIterable(org.apache.hadoop.hive.ql.metadata.PartitionIterable) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ColumnStatsList(org.apache.hadoop.hive.ql.parse.ColumnStatsList) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Aggregations

Statistics (org.apache.hadoop.hive.ql.plan.Statistics)15 ColStatistics (org.apache.hadoop.hive.ql.plan.ColStatistics)13 ArrayList (java.util.ArrayList)5 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)4 AnnotateWithStatistics (org.apache.hadoop.hive.ql.optimizer.stats.annotation.AnnotateWithStatistics)4 List (java.util.List)3 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)3 HashMap (java.util.HashMap)2 AggrStats (org.apache.hadoop.hive.metastore.api.AggrStats)2 FilterOperator (org.apache.hadoop.hive.ql.exec.FilterOperator)2 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)2 Operator (org.apache.hadoop.hive.ql.exec.Operator)2 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2 Partition (org.apache.hadoop.hive.ql.metadata.Partition)2 ColumnStatsList (org.apache.hadoop.hive.ql.parse.ColumnStatsList)2 PrunedPartitionList (org.apache.hadoop.hive.ql.parse.PrunedPartitionList)2 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)2 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)2 OperatorStats (org.apache.hadoop.hive.ql.stats.OperatorStats)2 ImmutableMap (com.google.common.collect.ImmutableMap)1