use of org.apache.hadoop.hive.ql.plan.ForwardDesc in project hive by apache.
the class SemanticAnalyzer method genGroupByPlan1ReduceMultiGBY.
@SuppressWarnings({ "nls" })
private Operator genGroupByPlan1ReduceMultiGBY(List<String> dests, QB qb, Operator input, Map<String, Operator> aliasToOpInfo) throws SemanticException {
QBParseInfo parseInfo = qb.getParseInfo();
ExprNodeDesc previous = null;
Operator selectInput = input;
// In order to facilitate partition pruning, or the where clauses together and put them at the
// top of the operator tree, this could also reduce the amount of data going to the reducer
List<ExprNodeDesc.ExprNodeDescEqualityWrapper> whereExpressions = new ArrayList<ExprNodeDesc.ExprNodeDescEqualityWrapper>();
for (String dest : dests) {
Pair<List<ASTNode>, List<Long>> grpByExprsGroupingSets = getGroupByGroupingSetsForClause(parseInfo, dest);
List<Long> groupingSets = grpByExprsGroupingSets.getRight();
if (!groupingSets.isEmpty()) {
throw new SemanticException(ErrorMsg.HIVE_GROUPING_SETS_AGGR_NOMAPAGGR_MULTIGBY.getMsg());
}
ASTNode whereExpr = parseInfo.getWhrForClause(dest);
if (whereExpr != null) {
OpParseContext inputCtx = opParseCtx.get(input);
RowResolver inputRR = inputCtx.getRowResolver();
ExprNodeDesc current = genExprNodeDesc((ASTNode) whereExpr.getChild(0), inputRR);
// Check the list of where expressions already added so they aren't duplicated
ExprNodeDesc.ExprNodeDescEqualityWrapper currentWrapped = new ExprNodeDesc.ExprNodeDescEqualityWrapper(current);
if (!whereExpressions.contains(currentWrapped)) {
whereExpressions.add(currentWrapped);
} else {
continue;
}
if (previous == null) {
// If this is the first expression
previous = current;
continue;
}
GenericUDFOPOr or = new GenericUDFOPOr();
List<ExprNodeDesc> expressions = new ArrayList<ExprNodeDesc>(2);
expressions.add(current);
expressions.add(previous);
previous = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, or, expressions);
} else {
// If an expression does not have a where clause, there can be no common filter
previous = null;
break;
}
}
if (previous != null) {
OpParseContext inputCtx = opParseCtx.get(input);
RowResolver inputRR = inputCtx.getRowResolver();
FilterDesc orFilterDesc = new FilterDesc(previous, false);
orFilterDesc.setGenerated(true);
selectInput = putOpInsertMap(OperatorFactory.getAndMakeChild(orFilterDesc, new RowSchema(inputRR.getColumnInfos()), input), inputRR);
}
// insert a select operator here used by the ColumnPruner to reduce
// the data to shuffle
Operator select = genSelectAllDesc(selectInput);
// Generate ReduceSinkOperator
ReduceSinkOperator reduceSinkOperatorInfo = genCommonGroupByPlanReduceSinkOperator(qb, dests, select);
// It is assumed throughout the code that a reducer has a single child, add a
// ForwardOperator so that we can add multiple filter/group by operators as children
RowResolver reduceSinkOperatorInfoRR = opParseCtx.get(reduceSinkOperatorInfo).getRowResolver();
Operator forwardOp = putOpInsertMap(OperatorFactory.getAndMakeChild(new ForwardDesc(), new RowSchema(reduceSinkOperatorInfoRR.getColumnInfos()), reduceSinkOperatorInfo), reduceSinkOperatorInfoRR);
Operator curr = forwardOp;
for (String dest : dests) {
curr = forwardOp;
if (parseInfo.getWhrForClause(dest) != null) {
ASTNode whereExpr = qb.getParseInfo().getWhrForClause(dest);
curr = genFilterPlan((ASTNode) whereExpr.getChild(0), qb, forwardOp, aliasToOpInfo, false, true);
}
// Generate GroupbyOperator
Operator groupByOperatorInfo = genGroupByPlanGroupByOperator(parseInfo, dest, curr, reduceSinkOperatorInfo, GroupByDesc.Mode.COMPLETE, null);
// TODO: should we pass curr instead of null?
curr = genPostGroupByBodyPlan(groupByOperatorInfo, dest, qb, aliasToOpInfo, null);
}
return curr;
}
Aggregations