use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class SortedMergeBucketMapJoinOptimizer method getCheckCandidateJoin.
// check if the join operator encountered is a candidate for being converted
// to a sort-merge join
private SemanticNodeProcessor getCheckCandidateJoin() {
return new SemanticNodeProcessor() {
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
SortBucketJoinProcCtx smbJoinContext = (SortBucketJoinProcCtx) procCtx;
JoinOperator joinOperator = (JoinOperator) nd;
int size = stack.size();
if (!(stack.get(size - 1) instanceof JoinOperator) || !(stack.get(size - 2) instanceof ReduceSinkOperator)) {
smbJoinContext.getRejectedJoinOps().add(joinOperator);
return null;
}
// not be converted.
for (int pos = size - 3; pos >= 0; pos--) {
Operator<? extends OperatorDesc> op = (Operator<? extends OperatorDesc>) stack.get(pos);
if (!op.supportAutomaticSortMergeJoin()) {
smbJoinContext.getRejectedJoinOps().add(joinOperator);
return null;
}
}
return null;
}
};
}
use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class SortedMergeBucketMapJoinOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
HiveConf conf = pctx.getConf();
SortBucketJoinProcCtx smbJoinContext = new SortBucketJoinProcCtx(conf);
// Get a list of joins which cannot be converted to a sort merge join
// Only selects and filters operators are allowed between the table scan and
// join currently. More operators can be added - the method supportAutomaticSortMergeJoin
// dictates which operator is allowed
getListOfRejectedJoins(pctx, smbJoinContext);
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
// go through all map joins and find out all which have enabled bucket map
// join.
opRules.put(new RuleRegExp("R1", MapJoinOperator.getOperatorName() + "%"), getSortedMergeBucketMapjoinProc(pctx));
// converted to sort-merge join
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN)) {
opRules.put(new RuleRegExp("R2", "JOIN%"), getSortedMergeJoinProc(pctx));
}
SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, smbJoinContext);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class SimpleFetchAggregation method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
if (pctx.getFetchTask() != null || !pctx.getQueryProperties().isQuery() || pctx.getQueryProperties().isAnalyzeRewrite() || pctx.getQueryProperties().isCTAS() || pctx.getLoadFileWork().size() > 1 || !pctx.getLoadTableWork().isEmpty()) {
return pctx;
}
String GBY = GroupByOperator.getOperatorName() + "%";
String RS = ReduceSinkOperator.getOperatorName() + "%";
String SEL = SelectOperator.getOperatorName() + "%";
String FS = FileSinkOperator.getOperatorName() + "%";
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp("R1", GBY + RS + GBY + SEL + FS), new SingleGBYProcessor(pctx));
opRules.put(new RuleRegExp("R2", GBY + RS + GBY + FS), new SingleGBYProcessor(pctx));
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class SortedDynPartitionTimeGranularityOptimizer method transform.
@Override
public ParseContext transform(ParseContext pCtx) throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining the
// operator stack. The dispatcher generates the plan from the operator tree
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
String FS = FileSinkOperator.getOperatorName() + "%";
opRules.put(new RuleRegExp("Sorted Dynamic Partition Time Granularity", FS), getSortDynPartProc(pCtx));
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pCtx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pCtx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class TablePropertyEnrichmentOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
LOG.info("TablePropertyEnrichmentOptimizer::transform().");
Map<SemanticRule, SemanticNodeProcessor> opRules = Maps.newLinkedHashMap();
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%"), new Processor());
WalkerCtx context = new WalkerCtx(pctx.getConf());
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, context);
List<Node> topNodes = Lists.newArrayList();
topNodes.addAll(pctx.getTopOps().values());
SemanticGraphWalker walker = new PreOrderWalker(disp);
walker.startWalking(topNodes, null);
LOG.info("TablePropertyEnrichmentOptimizer::transform() complete!");
return pctx;
}
Aggregations