use of org.apache.hadoop.hive.ql.optimizer.physical.MetadataOnlyOptimizer.WalkerCtx in project hive by apache.
the class NullScanTaskDispatcher method dispatch.
@Override
public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
Task<?> task = (Task<?>) nd;
// create a the context for walking operators
ParseContext parseContext = physicalContext.getParseContext();
WalkerCtx walkerCtx = new WalkerCtx();
List<MapWork> mapWorks = new ArrayList<MapWork>(task.getMapWork());
Collections.sort(mapWorks, new Comparator<MapWork>() {
@Override
public int compare(MapWork o1, MapWork o2) {
return o1.getName().compareTo(o2.getName());
}
});
for (MapWork mapWork : mapWorks) {
LOG.debug("Looking at: {}", mapWork.getName());
Collection<Operator<? extends OperatorDesc>> topOperators = mapWork.getAliasToWork().values();
if (topOperators.isEmpty()) {
LOG.debug("No top operators");
return null;
}
LOG.debug("Looking for table scans where optimization is applicable");
// The dispatcher fires the processor corresponding to the closest
// matching rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(null, rules, walkerCtx);
SemanticGraphWalker ogw = new PreOrderOnceWalker(disp);
// Create a list of topOp nodes
ArrayList<Node> topNodes = new ArrayList<>();
// Get the top Nodes for this task
Collection<TableScanOperator> topOps = parseContext.getTopOps().values();
for (Operator<? extends OperatorDesc> workOperator : topOperators) {
if (topOps.contains(workOperator)) {
topNodes.add(workOperator);
}
}
Operator<? extends OperatorDesc> reducer = task.getReducer(mapWork);
if (reducer != null) {
topNodes.add(reducer);
}
ogw.startWalking(topNodes, null);
int scanTableSize = walkerCtx.getMetadataOnlyTableScans().size();
LOG.debug("Found {} null table scans", scanTableSize);
if (scanTableSize > 0) {
processTableScans(mapWork, walkerCtx.getMetadataOnlyTableScans());
}
}
return null;
}
Aggregations