use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method processingInOperator.
private void processingInOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition> searchConditions, boolean isNot, Object... nodeOutputs) {
ExprNodeColumnDesc columnDesc = null;
String[] fields = null;
if (LOG.isTraceEnabled()) {
LOG.trace("Processing In Operator. nodeOutputs : " + Lists.newArrayList(nodeOutputs));
}
if (nodeOutputs[0] instanceof ExprNodeFieldDesc) {
// rowKey field
ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[0];
fields = ExprNodeDescUtils.extractFields(fieldDesc);
ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc) nodeOutputs[0], (ExprNodeDesc) nodeOutputs[1]);
if (extracted == null) {
// adding for tez
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace("nodeOutputs[0] : " + nodeOutputs[0] + ", nodeOutputs[1] : " + nodeOutputs[1] + " => " + Lists.newArrayList(extracted));
}
columnDesc = (ExprNodeColumnDesc) extracted[0];
} else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[0]).getChildren().get(0);
} else {
columnDesc = (ExprNodeColumnDesc) nodeOutputs[0];
}
String udfName = expr.getGenericUDF().getUdfName();
ExprNodeConstantDesc[] inConstantDescs = new ExprNodeConstantDesc[nodeOutputs.length - 1];
for (int i = 0, limit = inConstantDescs.length; i < limit; i++) {
if (!(nodeOutputs[i + 1] instanceof ExprNodeConstantDesc)) {
// adding for tez
return;
}
inConstantDescs[i] = (ExprNodeConstantDesc) nodeOutputs[i + 1];
}
searchConditions.add(new IndexSearchCondition(columnDesc, udfName, inConstantDescs, expr, fields, isNot));
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method analyzeExpr.
private ExprNodeDesc analyzeExpr(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition> searchConditions, Object... nodeOutputs) throws SemanticException {
if (FunctionRegistry.isOpAnd(expr)) {
assert (nodeOutputs.length == 2);
ExprNodeDesc residual1 = (ExprNodeDesc) nodeOutputs[0];
ExprNodeDesc residual2 = (ExprNodeDesc) nodeOutputs[1];
if (residual1 == null) {
return residual2;
}
if (residual2 == null) {
return residual1;
}
List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
residuals.add(residual1);
residuals.add(residual2);
return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry.getGenericUDFForAnd(), residuals);
}
GenericUDF genericUDF = expr.getGenericUDF();
if (!(genericUDF instanceof GenericUDFBaseCompare)) {
// 2015-10-22 Added by JeongMin Ju : Processing Between/In Operator
if (genericUDF instanceof GenericUDFBetween) {
// In case of not between, The value of first element of nodeOutputs is true.
// otherwise false.
processingBetweenOperator(expr, searchConditions, nodeOutputs);
return expr;
} else if (genericUDF instanceof GenericUDFIn) {
// In case of not in operator, in operator exist as child of not operator.
processingInOperator(expr, searchConditions, false, nodeOutputs);
return expr;
} else if (genericUDF instanceof GenericUDFOPNot && ((ExprNodeGenericFuncDesc) expr.getChildren().get(0)).getGenericUDF() instanceof GenericUDFIn) {
// In case of not in operator, in operator exist as child of not operator.
processingInOperator((ExprNodeGenericFuncDesc) expr.getChildren().get(0), searchConditions, true, ((ExprNodeGenericFuncDesc) nodeOutputs[0]).getChildren().toArray());
return expr;
} else if (genericUDF instanceof GenericUDFOPNull) {
processingNullOperator(expr, searchConditions, nodeOutputs);
return expr;
} else if (genericUDF instanceof GenericUDFOPNotNull) {
processingNotNullOperator(expr, searchConditions, nodeOutputs);
return expr;
} else {
return expr;
}
}
ExprNodeDesc expr1 = (ExprNodeDesc) nodeOutputs[0];
ExprNodeDesc expr2 = (ExprNodeDesc) nodeOutputs[1];
// user
if (expr1.getTypeInfo().equals(expr2.getTypeInfo())) {
expr1 = getColumnExpr(expr1);
expr2 = getColumnExpr(expr2);
}
ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair(expr1, expr2);
if (extracted == null || (extracted.length > 2 && !acceptsFields)) {
return expr;
}
ExprNodeColumnDesc columnDesc;
ExprNodeConstantDesc constantDesc;
if (extracted[0] instanceof ExprNodeConstantDesc) {
genericUDF = genericUDF.flip();
columnDesc = (ExprNodeColumnDesc) extracted[1];
constantDesc = (ExprNodeConstantDesc) extracted[0];
} else {
columnDesc = (ExprNodeColumnDesc) extracted[0];
constantDesc = (ExprNodeConstantDesc) extracted[1];
}
Set<String> allowed = columnToUDFs.get(columnDesc.getColumn());
if (allowed == null) {
return expr;
}
String udfName = genericUDF.getUdfName();
if (!allowed.contains(genericUDF.getUdfName())) {
return expr;
}
String[] fields = null;
if (extracted.length > 2) {
ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) extracted[2];
if (!isValidField(fieldDesc)) {
return expr;
}
fields = ExprNodeDescUtils.extractFields(fieldDesc);
}
// We also need to update the expr so that the index query can be
// generated.
// Note that, hive does not support UDFToDouble etc in the query text.
List<ExprNodeDesc> list = new ArrayList<ExprNodeDesc>();
list.add(expr1);
list.add(expr2);
expr = new ExprNodeGenericFuncDesc(expr.getTypeInfo(), expr.getGenericUDF(), list);
searchConditions.add(new IndexSearchCondition(columnDesc, udfName, constantDesc, expr, fields));
// remove it from the residual predicate
return fields == null ? null : expr;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method processingBetweenOperator.
private void processingBetweenOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition> searchConditions, Object... nodeOutputs) {
ExprNodeColumnDesc columnDesc = null;
String[] fields = null;
if (nodeOutputs[1] instanceof ExprNodeFieldDesc) {
// rowKey field
ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[1];
fields = ExprNodeDescUtils.extractFields(fieldDesc);
ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc) nodeOutputs[1], (ExprNodeDesc) nodeOutputs[2]);
columnDesc = (ExprNodeColumnDesc) extracted[0];
} else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[1]).getChildren().get(0);
} else {
columnDesc = (ExprNodeColumnDesc) nodeOutputs[1];
}
String udfName = expr.getGenericUDF().getUdfName();
ExprNodeConstantDesc[] betweenConstants = new ExprNodeConstantDesc[] { (ExprNodeConstantDesc) nodeOutputs[2], (ExprNodeConstantDesc) nodeOutputs[3] };
boolean isNot = (Boolean) ((ExprNodeConstantDesc) nodeOutputs[0]).getValue();
searchConditions.add(new IndexSearchCondition(columnDesc, udfName, betweenConstants, expr, fields, isNot));
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project flink by apache.
the class HiveParserSemanticAnalyzer method genAllExprNodeDesc.
/**
* Generates all of the expression node descriptors for the expression and children of it passed
* in the arguments. This function uses the row resolver and the metadata information that are
* passed as arguments to resolve the column names to internal names.
*/
@SuppressWarnings("nls")
public Map<HiveParserASTNode, ExprNodeDesc> genAllExprNodeDesc(HiveParserASTNode expr, HiveParserRowResolver input, HiveParserTypeCheckCtx tcCtx) throws SemanticException {
// Create the walker and the rules dispatcher.
tcCtx.setUnparseTranslator(unparseTranslator);
Map<HiveParserASTNode, ExprNodeDesc> nodeOutputs = HiveParserTypeCheckProcFactory.genExprNode(expr, tcCtx);
ExprNodeDesc desc = nodeOutputs.get(expr);
if (desc == null) {
String errMsg = tcCtx.getError();
if (errMsg == null) {
errMsg = "Error in parsing ";
}
throw new SemanticException(errMsg);
}
if (desc instanceof HiveParserExprNodeColumnListDesc) {
throw new SemanticException("TOK_ALLCOLREF is not supported in current context");
}
if (!unparseTranslator.isEnabled()) {
// Not creating a view, so no need to track view expansions.
return nodeOutputs;
}
Map<ExprNodeDesc, String> nodeToText = new HashMap<>();
List<HiveParserASTNode> fieldDescList = new ArrayList<>();
for (Map.Entry<HiveParserASTNode, ExprNodeDesc> entry : nodeOutputs.entrySet()) {
if (!(entry.getValue() instanceof ExprNodeColumnDesc)) {
// struct<>.
if (entry.getValue() instanceof ExprNodeFieldDesc) {
fieldDescList.add(entry.getKey());
}
continue;
}
HiveParserASTNode node = entry.getKey();
ExprNodeColumnDesc columnDesc = (ExprNodeColumnDesc) entry.getValue();
if ((columnDesc.getTabAlias() == null) || (columnDesc.getTabAlias().length() == 0)) {
// internal expressions used in the representation of aggregation.
continue;
}
String[] tmp = input.reverseLookup(columnDesc.getColumn());
// in subquery case, tmp may be from outside.
if (tmp[0] != null && columnDesc.getTabAlias() != null && !tmp[0].equals(columnDesc.getTabAlias()) && tcCtx.getOuterRR() != null) {
tmp = tcCtx.getOuterRR().reverseLookup(columnDesc.getColumn());
}
StringBuilder replacementText = new StringBuilder();
replacementText.append(HiveUtils.unparseIdentifier(tmp[0], conf));
replacementText.append(".");
replacementText.append(HiveUtils.unparseIdentifier(tmp[1], conf));
nodeToText.put(columnDesc, replacementText.toString());
unparseTranslator.addTranslation(node, replacementText.toString());
}
for (HiveParserASTNode node : fieldDescList) {
Map<HiveParserASTNode, String> map = translateFieldDesc(node);
for (Entry<HiveParserASTNode, String> entry : map.entrySet()) {
unparseTranslator.addTranslation(entry.getKey(), entry.getValue());
}
}
return nodeOutputs;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project hive by apache.
the class PartitionPruner method removeNonPartCols.
/**
* See compactExpr. Some things in the expr are replaced with nulls for pruner, however
* the virtual columns are not removed (ExprNodeColumnDesc cannot tell them apart from
* partition columns), so we do it here.
* The expression is only used to prune by partition name, so we have no business with VCs.
* @param expr original partition pruning expression.
* @param partCols list of partition columns for the table.
* @param referred partition columns referred by expr
* @return partition pruning expression that only contains partition columns from the list.
*/
private static ExprNodeDesc removeNonPartCols(ExprNodeDesc expr, List<String> partCols, Set<String> referred) {
if (expr instanceof ExprNodeFieldDesc) {
// list or struct fields.
return new ExprNodeConstantDesc(expr.getTypeInfo(), null);
} else if (expr instanceof ExprNodeColumnDesc) {
String column = ((ExprNodeColumnDesc) expr).getColumn();
if (!partCols.contains(column)) {
// Column doesn't appear to be a partition column for the table.
return new ExprNodeConstantDesc(expr.getTypeInfo(), null);
}
referred.add(column);
} else if (expr instanceof ExprNodeGenericFuncDesc) {
List<ExprNodeDesc> children = expr.getChildren();
for (int i = 0; i < children.size(); ++i) {
ExprNodeDesc other = removeNonPartCols(children.get(i), partCols, referred);
if (ExprNodeDescUtils.isNullConstant(other)) {
if (FunctionRegistry.isOpAnd(expr)) {
// partcol=... AND nonpartcol=... is replaced with partcol=... AND TRUE
// which will be folded to partcol=...
// This cannot be done also for OR
Preconditions.checkArgument(expr.getTypeInfo().accept(TypeInfoFactory.booleanTypeInfo));
other = new ExprNodeConstantDesc(expr.getTypeInfo(), true);
} else {
// and cause overaggressive prunning, missing data (incorrect result)
return new ExprNodeConstantDesc(expr.getTypeInfo(), null);
}
}
children.set(i, other);
}
}
return expr;
}
Aggregations