use of org.apache.hadoop.hive.ql.plan.ColStatistics in project hive by apache.
the class FilterSelectivityEstimator method getMaxNulls.
/**
* Given a RexCall & TableScan find max no of nulls. Currently it picks the
* col with max no of nulls.
*
* TODO: improve this
*
* @param call
* @param t
* @return
*/
private long getMaxNulls(RexCall call, HiveTableScan t) {
long tmpNoNulls = 0;
long maxNoNulls = 0;
Set<Integer> iRefSet = HiveCalciteUtil.getInputRefs(call);
List<ColStatistics> colStats = t.getColStat(new ArrayList<Integer>(iRefSet));
for (ColStatistics cs : colStats) {
tmpNoNulls = cs.getNumNulls();
if (tmpNoNulls > maxNoNulls) {
maxNoNulls = tmpNoNulls;
}
}
return maxNoNulls;
}
use of org.apache.hadoop.hive.ql.plan.ColStatistics in project hive by apache.
the class StatsUtils method inferForeignKey.
/**
* Infer foreign key relationship from given column statistics.
* @param csPK - column statistics of primary key
* @param csFK - column statistics of potential foreign key
* @return
*/
public static boolean inferForeignKey(ColStatistics csPK, ColStatistics csFK) {
if (csPK != null && csFK != null) {
if (csPK.isPrimaryKey()) {
if (csPK.getRange() != null && csFK.getRange() != null) {
ColStatistics.Range pkRange = csPK.getRange();
ColStatistics.Range fkRange = csFK.getRange();
return isWithin(fkRange, pkRange);
}
}
}
return false;
}
use of org.apache.hadoop.hive.ql.plan.ColStatistics in project hive by apache.
the class ConvertJoinMapJoin method checkNumberOfEntriesForHashTable.
/* Returns true if it passes the test, false otherwise. */
private boolean checkNumberOfEntriesForHashTable(JoinOperator joinOp, int position, OptimizeTezProcContext context) {
long max = HiveConf.getLongVar(context.parseContext.getConf(), HiveConf.ConfVars.HIVECONVERTJOINMAXENTRIESHASHTABLE);
if (max < 1) {
// Max is disabled, we can safely return true
return true;
}
// Calculate number of different entries and evaluate
ReduceSinkOperator rsOp = (ReduceSinkOperator) joinOp.getParentOperators().get(position);
List<String> keys = StatsUtils.getQualifedReducerKeyNames(rsOp.getConf().getOutputKeyColumnNames());
Statistics inputStats = rsOp.getStatistics();
List<ColStatistics> columnStats = new ArrayList<>();
for (String key : keys) {
ColStatistics cs = inputStats.getColumnStatisticsFromColName(key);
if (cs == null) {
LOG.debug("Couldn't get statistics for: {}", key);
return true;
}
columnStats.add(cs);
}
long numRows = inputStats.getNumRows();
long estimation = estimateNDV(numRows, columnStats);
LOG.debug("Estimated NDV for input {}: {}; Max NDV for MapJoin conversion: {}", position, estimation, max);
if (estimation > max) {
// Estimation larger than max
LOG.debug("Number of different entries for HashTable is greater than the max; " + "we do not converting to MapJoin");
return false;
}
// We can proceed with the conversion
return true;
}
use of org.apache.hadoop.hive.ql.plan.ColStatistics in project hive by apache.
the class ConvertJoinMapJoin method estimateNDV.
private static long estimateNDV(long numRows, List<ColStatistics> columnStats) {
// If there is a single column, return the number of distinct values
if (columnStats.size() == 1) {
return columnStats.get(0).getCountDistint();
}
// The expected number of distinct values when choosing p values
// with replacement from n integers is n . (1 - ((n - 1) / n) ^ p).
//
// If we have several uniformly distributed attributes A1 ... Am
// with N1 ... Nm distinct values, they behave as one uniformly
// distributed attribute with N1 * ... * Nm distinct values.
long n = 1L;
for (ColStatistics cs : columnStats) {
final long ndv = cs.getCountDistint();
if (ndv > 1) {
n = StatsUtils.safeMult(n, ndv);
}
}
final double nn = (double) n;
final double a = (nn - 1d) / nn;
if (a == 1d) {
// A under-flows if nn is large.
return numRows;
}
final double v = nn * (1d - Math.pow(a, numRows));
// to go a few % over.
return Math.min(Math.round(v), numRows);
}
use of org.apache.hadoop.hive.ql.plan.ColStatistics in project hive by apache.
the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.
public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
// remove the tag for in-memory side of mapjoin
parentRS.getConf().setSkipTag(true);
parentRS.setSkipTag(true);
// Mark this small table as being processed
if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
}
List<BaseWork> mapJoinWork = null;
/*
* if there was a pre-existing work generated for the big-table mapjoin side,
* we need to hook the work generated for the RS (associated with the RS-MJ pattern)
* with the pre-existing work.
*
* Otherwise, we need to associate that the mapjoin op
* to be linked to the RS work (associated with the RS-MJ pattern).
*
*/
mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
BaseWork parentWork = getMapJoinParentWork(context, parentRS);
// set the link between mapjoin and parent vertex
int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
if (pos == -1) {
throw new SemanticException("Cannot find position of parent in mapjoin");
}
MapJoinDesc joinConf = mapJoinOp.getConf();
long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
long tableSize = Long.MAX_VALUE;
Statistics stats = parentRS.getStatistics();
if (stats != null) {
keyCount = rowCount = stats.getNumRows();
if (keyCount <= 0) {
keyCount = rowCount = Long.MAX_VALUE;
}
tableSize = stats.getDataSize();
ArrayList<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
if (keyCols != null && !keyCols.isEmpty()) {
// See if we can arrive at a smaller number using distinct stats from key columns.
long maxKeyCount = 1;
String prefix = Utilities.ReduceField.KEY.toString();
for (String keyCol : keyCols) {
ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
if (cs == null || cs.getCountDistint() <= 0) {
maxKeyCount = Long.MAX_VALUE;
break;
}
maxKeyCount *= cs.getCountDistint();
if (maxKeyCount >= keyCount) {
break;
}
}
keyCount = Math.min(maxKeyCount, keyCount);
}
if (joinConf.isBucketMapJoin()) {
OpTraits opTraits = mapJoinOp.getOpTraits();
bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
if (bucketCount > 0) {
// We cannot obtain a better estimate without CustomPartitionVertex providing it
// to us somehow; in which case using statistics would be completely unnecessary.
keyCount /= bucketCount;
tableSize /= bucketCount;
}
} else if (joinConf.isDynamicPartitionHashJoin()) {
// For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
bucketCount = parentRS.getConf().getNumReducers();
keyCount /= bucketCount;
tableSize /= bucketCount;
}
}
if (keyCount == 0) {
keyCount = 1;
}
if (tableSize == 0) {
tableSize = 1;
}
LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = )" + joinConf.isBucketMapJoin() + ", pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
joinConf.getParentToInput().put(pos, parentWork.getName());
if (keyCount != Long.MAX_VALUE) {
joinConf.getParentKeyCounts().put(pos, keyCount);
}
joinConf.getParentDataSizes().put(pos, tableSize);
int numBuckets = -1;
EdgeType edgeType = EdgeType.BROADCAST_EDGE;
if (joinConf.isBucketMapJoin()) {
numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
/*
* Here, we can be in one of 4 states.
*
* 1. If map join work is null implies that we have not yet traversed the big table side. We
* just need to see if we can find a reduce sink operator in the big table side. This would
* imply a reduce side operation.
*
* 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
*
* 3. If we have already created a work item for the big table side, we need to see if we can
* find a table scan operator in the big table side. This would imply a map side operation.
*
* 4. If we don't find a table scan operator, it has to be a reduce side operation.
*/
if (mapJoinWork == null) {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
if (rootOp == null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
} else {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
if (rootOp != null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
} else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
if (edgeType == EdgeType.CUSTOM_EDGE) {
// disable auto parallelism for bucket map joins
parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
}
TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
// link the work with the work associated with the reduce sink that triggered this rule
TezWork tezWork = context.currentTask.getWork();
LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
tezWork.connect(parentWork, myWork, edgeProp);
if (edgeType == EdgeType.CUSTOM_EDGE) {
tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
}
ReduceSinkOperator r = null;
if (context.connectedReduceSinks.contains(parentRS)) {
LOG.debug("Cloning reduce sink for multi-child broadcast edge");
// we've already set this one up. Need to clone for the next work.
r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
context.clonedReduceSinks.add(r);
} else {
r = parentRS;
}
// remember the output name of the reduce sink
r.getConf().setOutputName(myWork.getName());
context.connectedReduceSinks.add(r);
}
}
// remember in case we need to connect additional work later
Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
} else {
linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
}
linkWorkMap.put(parentWork, edgeProp);
context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
if (reduceSinks == null) {
reduceSinks = new ArrayList<ReduceSinkOperator>();
}
reduceSinks.add(parentRS);
context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
// create the dummy operators
List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
// create an new operator: HashTableDummyOperator, which share the table desc
HashTableDummyDesc desc = new HashTableDummyDesc();
@SuppressWarnings("unchecked") HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
TableDesc tbl;
// need to create the correct table descriptor for key/value
RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
dummyOp.getConf().setTbl(tbl);
Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
StringBuilder keyOrder = new StringBuilder();
StringBuilder keyNullOrder = new StringBuilder();
for (ExprNodeDesc k : keyCols) {
keyOrder.append("+");
keyNullOrder.append("a");
}
TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
// let the dummy op be the parent of mapjoin op
mapJoinOp.replaceParent(parentRS, dummyOp);
List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
dummyChildren.add(mapJoinOp);
dummyOp.setChildOperators(dummyChildren);
dummyOperators.add(dummyOp);
// cut the operator tree so as to not retain connections from the parent RS downstream
List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
int childIndex = childOperators.indexOf(mapJoinOp);
childOperators.remove(childIndex);
// at task startup
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
myWork.addDummyOp(dummyOp);
}
}
if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
dummyOperators.add(op);
}
}
context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
return true;
}
Aggregations