use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.
the class ConvertJoinMapJoin method getMapJoinConversion.
/**
* Obtain big table position for join.
*
* @param joinOp join operator
* @param context optimization context
* @param buckets bucket count for Bucket Map Join conversion consideration or reduce count
* for Dynamic Hash Join conversion consideration
* @param skipJoinTypeChecks whether to skip join type checking
* @param maxSize size threshold for Map Join conversion
* @param checkMapJoinThresholds whether to check thresholds to convert to Map Join
* @return returns big table position or -1 if it cannot be determined
* @throws SemanticException
*/
public MapJoinConversion getMapJoinConversion(JoinOperator joinOp, OptimizeTezProcContext context, int buckets, boolean skipJoinTypeChecks, long maxSize, boolean checkMapJoinThresholds) throws SemanticException {
JoinDesc joinDesc = joinOp.getConf();
JoinCondDesc[] conds = joinDesc.getConds();
if (!skipJoinTypeChecks) {
/*
* HIVE-9038: Join tests fail in tez when we have more than 1 join on the same key and there is
* an outer join down the join tree that requires filterTag. We disable this conversion to map
* join here now. We need to emulate the behavior of HashTableSinkOperator as in MR or create a
* new operation to be able to support this. This seems like a corner case enough to special
* case this for now.
*/
if (conds.length > 1) {
if (hasOuterJoin(joinOp)) {
return null;
}
}
}
// Assume.
boolean isFullOuterEnabledForDynamicPartitionHashJoin = false;
boolean isFullOuterEnabledForMapJoin = false;
boolean isFullOuterJoin = MapJoinProcessor.precheckFullOuter(context.conf, joinOp);
if (isFullOuterJoin) {
boolean isFullOuterEnabled = MapJoinProcessor.isFullOuterMapEnabled(context.conf, joinOp);
if (isFullOuterEnabled) {
// FUTURE: Currently, we only support DPHJ.
isFullOuterEnabledForDynamicPartitionHashJoin = MapJoinProcessor.isFullOuterEnabledForDynamicPartitionHashJoin(context.conf, joinOp);
}
}
Set<Integer> bigTableCandidateSet = MapJoinProcessor.getBigTableCandidates(conds, /* isSupportFullOuter */
true);
if (bigTableCandidateSet.isEmpty()) {
return null;
}
int bigTablePosition = -1;
// big input cumulative row count
long bigInputCumulativeCardinality = -1L;
// stats of the big input
Statistics bigInputStat = null;
// bigTableFound means we've encountered a table that's bigger than the
// max. This table is either the the big table or we cannot convert.
boolean foundInputNotFittingInMemory = false;
// total size of the inputs
long totalSize = 0;
// convert to DPHJ
boolean convertDPHJ = false;
for (int pos = 0; pos < joinOp.getParentOperators().size(); pos++) {
Operator<? extends OperatorDesc> parentOp = joinOp.getParentOperators().get(pos);
Statistics currInputStat = parentOp.getStatistics();
if (currInputStat == null) {
LOG.warn("Couldn't get statistics from: " + parentOp);
return null;
}
long inputSize = computeOnlineDataSize(currInputStat);
LOG.info("Join input#{}; onlineDataSize: {}; Statistics: {}", pos, inputSize, currInputStat);
boolean currentInputNotFittingInMemory = false;
if ((bigInputStat == null) || (inputSize > computeOnlineDataSize(bigInputStat))) {
if (foundInputNotFittingInMemory) {
// on size and there's another one that's bigger.
return null;
}
if (inputSize / buckets > maxSize) {
if (!bigTableCandidateSet.contains(pos)) {
// big for the map side.
return null;
}
currentInputNotFittingInMemory = true;
foundInputNotFittingInMemory = true;
}
}
long currentInputCumulativeCardinality;
if (foundInputNotFittingInMemory) {
currentInputCumulativeCardinality = -1L;
} else {
Long cardinality = computeCumulativeCardinality(parentOp);
if (cardinality == null) {
// We could not get stats, we cannot convert
return null;
}
currentInputCumulativeCardinality = cardinality;
}
// This input is the big table if it is contained in the big candidates set, and either:
// 1) we have not chosen a big table yet, or
// 2) it has been chosen as the big table above, or
// 3) the cumulative cardinality for this input is higher, or
// 4) the cumulative cardinality is equal, but the size is bigger,
boolean selectedBigTable = bigTableCandidateSet.contains(pos) && (bigInputStat == null || currentInputNotFittingInMemory || (!foundInputNotFittingInMemory && (currentInputCumulativeCardinality > bigInputCumulativeCardinality || (currentInputCumulativeCardinality == bigInputCumulativeCardinality && inputSize > computeOnlineDataSize(bigInputStat)))));
if (bigInputStat != null && selectedBigTable) {
// We are replacing the current big table with a new one, thus
// we need to count the current one as a map table then.
totalSize += computeOnlineDataSize(bigInputStat);
// for HashMap
if (checkMapJoinThresholds && !checkNumberOfEntriesForHashTable(joinOp, bigTablePosition, context)) {
convertDPHJ = true;
}
} else if (!selectedBigTable) {
// This is not the first table and we are not using it as big table,
// in fact, we're adding this table as a map table
totalSize += inputSize;
// for HashMap
if (checkMapJoinThresholds && !checkNumberOfEntriesForHashTable(joinOp, pos, context)) {
convertDPHJ = true;
}
}
if (totalSize / buckets > maxSize) {
// hence cannot convert.
return null;
}
if (selectedBigTable) {
bigTablePosition = pos;
bigInputCumulativeCardinality = currentInputCumulativeCardinality;
bigInputStat = currInputStat;
}
}
if (bigTablePosition == -1) {
LOG.debug("No big table selected, no MapJoin");
return null;
}
// Check if size of data to shuffle (larger table) is less than given max size
if (checkMapJoinThresholds && convertDPHJ && checkShuffleSizeForLargeTable(joinOp, bigTablePosition, context)) {
LOG.debug("Conditions to convert to MapJoin are not met");
return null;
}
// only allow cross product in map joins if build side is 'small'
boolean cartesianProductEdgeEnabled = HiveConf.getBoolVar(context.conf, HiveConf.ConfVars.TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED);
if (cartesianProductEdgeEnabled && !hasOuterJoin(joinOp) && isCrossProduct(joinOp)) {
for (int i = 0; i < joinOp.getParentOperators().size(); i++) {
if (i != bigTablePosition) {
Statistics parentStats = joinOp.getParentOperators().get(i).getStatistics();
if (parentStats.getNumRows() > HiveConf.getIntVar(context.conf, HiveConf.ConfVars.XPRODSMALLTABLEROWSTHRESHOLD)) {
// threshold rows we would disable mapjoin
return null;
}
}
}
}
// We store the total memory that this MapJoin is going to use,
// which is calculated as totalSize/buckets, with totalSize
// equal to sum of small tables size.
joinOp.getConf().setInMemoryDataSize(totalSize / buckets);
return new MapJoinConversion(bigTablePosition, isFullOuterJoin, isFullOuterEnabledForDynamicPartitionHashJoin, isFullOuterEnabledForMapJoin);
}
use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.
the class ConvertJoinMapJoin method checkNumberOfEntriesForHashTable.
/* Returns true if it passes the test, false otherwise. */
private boolean checkNumberOfEntriesForHashTable(JoinOperator joinOp, int position, OptimizeTezProcContext context) {
long max = HiveConf.getLongVar(context.parseContext.getConf(), HiveConf.ConfVars.HIVECONVERTJOINMAXENTRIESHASHTABLE);
if (max < 1) {
// Max is disabled, we can safely return true
return true;
}
// Calculate number of different entries and evaluate
ReduceSinkOperator rsOp = (ReduceSinkOperator) joinOp.getParentOperators().get(position);
List<String> keys = StatsUtils.getQualifedReducerKeyNames(rsOp.getConf().getOutputKeyColumnNames());
Statistics inputStats = rsOp.getStatistics();
List<ColStatistics> columnStats = new ArrayList<>();
for (String key : keys) {
ColStatistics cs = inputStats.getColumnStatisticsFromColName(key);
if (cs == null) {
return true;
}
columnStats.add(cs);
}
long numRows = inputStats.getNumRows();
long estimation = estimateNDV(numRows, columnStats);
LOG.debug("Estimated NDV for input {}: {}; Max NDV for MapJoin conversion: {}", position, estimation, max);
if (estimation > max) {
// Estimation larger than max
LOG.debug("Number of different entries for HashTable is greater than the max; " + "we do not convert to MapJoin");
return false;
}
// We can proceed with the conversion
return true;
}
use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.
the class TezCompiler method getBloomFilterSelectivity.
private static double getBloomFilterSelectivity(SelectOperator sel, ExprNodeDesc selExpr, Statistics filStats, ExprNodeDesc tsExpr) {
Statistics selStats = sel.getStatistics();
assert selStats != null;
assert filStats != null;
// For cardinality values use numRows as default, try to use ColStats if available
long selKeyCardinality = selStats.getNumRows();
long tsKeyCardinality = filStats.getNumRows();
long keyDomainCardinality = selKeyCardinality + tsKeyCardinality;
ExprNodeColumnDesc selCol = ExprNodeDescUtils.getColumnExpr(selExpr);
ExprNodeColumnDesc tsCol = ExprNodeDescUtils.getColumnExpr(tsExpr);
if (selCol != null && tsCol != null) {
// Check if there are column stats available for these columns
ColStatistics selColStat = selStats.getColumnStatisticsFromColName(selCol.getColumn());
ColStatistics filColStat = filStats.getColumnStatisticsFromColName(tsCol.getColumn());
if (canUseNDV(selColStat)) {
selKeyCardinality = selColStat.getCountDistint();
}
// Get colstats for the original table column for selCol if possible, this would have
// more accurate information about the original NDV of the column before any filtering.
ColStatistics selColSourceStat = null;
if (selColStat != null) {
ExprNodeDescUtils.ColumnOrigin selColSource = ExprNodeDescUtils.findColumnOrigin(selCol, sel);
if (selColSource != null && selColSource.op.getStatistics() != null) {
selColSourceStat = selColSource.op.getStatistics().getColumnStatisticsFromColName(selColSource.col.getColumn());
}
}
long domainCardinalityFromColStats = getCombinedKeyDomainCardinality(selColStat, selColSourceStat, filColStat);
if (domainCardinalityFromColStats >= 0) {
keyDomainCardinality = domainCardinalityFromColStats;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("BloomFilter selectivity for " + selCol + " to " + tsCol + ", selKeyCardinality=" + selKeyCardinality + ", tsKeyCardinality=" + tsKeyCardinality + ", keyDomainCardinality=" + keyDomainCardinality);
}
// Selectivity: key cardinality of semijoin / domain cardinality
return selKeyCardinality / (double) keyDomainCardinality;
}
use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.
the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.
public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
// remove the tag for in-memory side of mapjoin
parentRS.getConf().setSkipTag(true);
parentRS.setSkipTag(true);
// Mark this small table as being processed
if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
}
List<BaseWork> mapJoinWork = null;
/*
* if there was a pre-existing work generated for the big-table mapjoin side,
* we need to hook the work generated for the RS (associated with the RS-MJ pattern)
* with the pre-existing work.
*
* Otherwise, we need to associate that the mapjoin op
* to be linked to the RS work (associated with the RS-MJ pattern).
*
*/
mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
BaseWork parentWork = getMapJoinParentWork(context, parentRS);
// set the link between mapjoin and parent vertex
int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
if (pos == -1) {
throw new SemanticException("Cannot find position of parent in mapjoin");
}
MapJoinDesc joinConf = mapJoinOp.getConf();
long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
long tableSize = Long.MAX_VALUE;
Statistics stats = parentRS.getStatistics();
if (stats != null) {
keyCount = rowCount = stats.getNumRows();
if (keyCount <= 0) {
keyCount = rowCount = Long.MAX_VALUE;
}
tableSize = stats.getDataSize();
List<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
if (keyCols != null && !keyCols.isEmpty()) {
// See if we can arrive at a smaller number using distinct stats from key columns.
long maxKeyCount = 1;
String prefix = Utilities.ReduceField.KEY.toString();
for (String keyCol : keyCols) {
ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
if (cs == null || cs.getCountDistint() <= 0) {
maxKeyCount = Long.MAX_VALUE;
break;
}
maxKeyCount *= cs.getCountDistint();
if (maxKeyCount >= keyCount) {
break;
}
}
keyCount = Math.min(maxKeyCount, keyCount);
}
if (joinConf.isBucketMapJoin()) {
OpTraits opTraits = mapJoinOp.getOpTraits();
bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
if (bucketCount > 0) {
// We cannot obtain a better estimate without CustomPartitionVertex providing it
// to us somehow; in which case using statistics would be completely unnecessary.
keyCount /= bucketCount;
tableSize /= bucketCount;
}
} else if (joinConf.isDynamicPartitionHashJoin()) {
// For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
bucketCount = parentRS.getConf().getNumReducers();
keyCount /= bucketCount;
tableSize /= bucketCount;
}
}
if (keyCount == 0) {
keyCount = 1;
}
if (tableSize == 0) {
tableSize = 1;
}
LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = " + joinConf.isBucketMapJoin() + "), pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
joinConf.getParentToInput().put(pos, parentWork.getName());
if (keyCount != Long.MAX_VALUE) {
joinConf.getParentKeyCounts().put(pos, keyCount);
}
joinConf.getParentDataSizes().put(pos, tableSize);
int numBuckets = -1;
EdgeType edgeType = EdgeType.BROADCAST_EDGE;
if (joinConf.isBucketMapJoin()) {
numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
/*
* Here, we can be in one of 4 states.
*
* 1. If map join work is null implies that we have not yet traversed the big table side. We
* just need to see if we can find a reduce sink operator in the big table side. This would
* imply a reduce side operation.
*
* 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
*
* 3. If we have already created a work item for the big table side, we need to see if we can
* find a table scan operator in the big table side. This would imply a map side operation.
*
* 4. If we don't find a table scan operator, it has to be a reduce side operation.
*/
if (mapJoinWork == null) {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
if (rootOp == null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
} else {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
if (rootOp != null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
} else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
if (parentRS.getConf().isForwarding()) {
edgeType = EdgeType.ONE_TO_ONE_EDGE;
} else {
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
if (edgeType == EdgeType.CUSTOM_EDGE) {
// disable auto parallelism for bucket map joins
parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
}
TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
// link the work with the work associated with the reduce sink that triggered this rule
TezWork tezWork = context.currentTask.getWork();
LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
tezWork.connect(parentWork, myWork, edgeProp);
if (edgeType == EdgeType.CUSTOM_EDGE) {
tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
}
ReduceSinkOperator r = null;
if (context.connectedReduceSinks.contains(parentRS)) {
LOG.debug("Cloning reduce sink " + parentRS + " for multi-child broadcast edge");
// we've already set this one up. Need to clone for the next work.
r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
context.clonedReduceSinks.add(r);
} else {
r = parentRS;
}
// remember the output name of the reduce sink
r.getConf().setOutputName(myWork.getName());
context.connectedReduceSinks.add(r);
}
}
// remember in case we need to connect additional work later
Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
} else {
linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
}
linkWorkMap.put(parentWork, edgeProp);
context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
if (reduceSinks == null) {
reduceSinks = new ArrayList<ReduceSinkOperator>();
}
reduceSinks.add(parentRS);
context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
// create the dummy operators
List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
// create an new operator: HashTableDummyOperator, which share the table desc
HashTableDummyDesc desc = new HashTableDummyDesc();
HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
TableDesc tbl;
// need to create the correct table descriptor for key/value
RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
dummyOp.getConf().setTbl(tbl);
Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
StringBuilder keyOrder = new StringBuilder();
StringBuilder keyNullOrder = new StringBuilder();
for (ExprNodeDesc k : keyCols) {
keyOrder.append("+");
keyNullOrder.append(NullOrdering.defaultNullOrder(context.conf).getSign());
}
TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
// let the dummy op be the parent of mapjoin op
mapJoinOp.replaceParent(parentRS, dummyOp);
List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
dummyChildren.add(mapJoinOp);
dummyOp.setChildOperators(dummyChildren);
dummyOperators.add(dummyOp);
// cut the operator tree so as to not retain connections from the parent RS downstream
List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
int childIndex = childOperators.indexOf(mapJoinOp);
childOperators.remove(childIndex);
// at task startup
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
myWork.addDummyOp(dummyOp);
}
}
if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
dummyOperators.add(op);
}
}
context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
return true;
}
use of org.apache.hadoop.hive.ql.plan.Statistics in project hive by apache.
the class SetHashGroupByMinReduction method process.
@SuppressWarnings("unchecked")
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
GroupByOperator groupByOperator = (GroupByOperator) nd;
GroupByDesc desc = groupByOperator.getConf();
if (desc.getMode() != Mode.HASH || groupByOperator.getStatistics().getBasicStatsState() != State.COMPLETE || groupByOperator.getStatistics().getColumnStatsState() != State.COMPLETE) {
return null;
}
// compute product of distinct values of grouping columns
List<ColStatistics> colStats = new ArrayList<>();
for (int i = 0; i < desc.getKeys().size(); i++) {
ColumnInfo ci = groupByOperator.getSchema().getSignature().get(i);
colStats.add(groupByOperator.getStatistics().getColumnStatisticsFromColName(ci.getInternalName()));
}
Statistics parentStats = groupByOperator.getParentOperators().get(0).getStatistics();
long ndvProduct = StatsUtils.computeNDVGroupingColumns(colStats, parentStats, true);
// if ndvProduct is 0 then column stats state must be partial and we are missing
if (ndvProduct == 0) {
return null;
}
long numRows = parentStats.getNumRows();
if (ndvProduct > numRows) {
ndvProduct = numRows;
}
// change the min reduction for hash group by
float defaultMinReductionHashAggrFactor = desc.getMinReductionHashAggr();
float defaultMinReductionHashAggrFactorLowerBound = desc.getMinReductionHashAggrLowerBound();
float minReductionHashAggrFactor = 1f - ((float) ndvProduct / numRows);
if (minReductionHashAggrFactor < defaultMinReductionHashAggrFactorLowerBound) {
minReductionHashAggrFactor = defaultMinReductionHashAggrFactorLowerBound;
}
if (minReductionHashAggrFactor < defaultMinReductionHashAggrFactor) {
desc.setMinReductionHashAggr(minReductionHashAggrFactor);
if (LOG.isDebugEnabled()) {
LOG.debug("Minimum reduction for hash group by operator {} set to {}", groupByOperator, minReductionHashAggrFactor);
}
}
return null;
}
Aggregations