use of org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc in project hive by apache.
the class TestConvertAstToSearchArg method getFuncDesc.
private ExprNodeGenericFuncDesc getFuncDesc(String xmlSerialized) {
byte[] bytes;
try {
bytes = xmlSerialized.getBytes("UTF-8");
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException("UTF-8 support required", ex);
}
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
XMLDecoder decoder = new XMLDecoder(bais, null, null);
try {
return (ExprNodeGenericFuncDesc) decoder.readObject();
} finally {
decoder.close();
}
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method processingInOperator.
private void processingInOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition> searchConditions, boolean isNot, Object... nodeOutputs) {
ExprNodeColumnDesc columnDesc = null;
String[] fields = null;
if (LOG.isTraceEnabled()) {
LOG.trace("Processing In Operator. nodeOutputs : " + Lists.newArrayList(nodeOutputs));
}
if (nodeOutputs[0] instanceof ExprNodeFieldDesc) {
// rowKey field
ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[0];
fields = ExprNodeDescUtils.extractFields(fieldDesc);
ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc) nodeOutputs[0], (ExprNodeDesc) nodeOutputs[1]);
if (extracted == null) {
// adding for tez
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace("nodeOutputs[0] : " + nodeOutputs[0] + ", nodeOutputs[1] : " + nodeOutputs[1] + " => " + Lists.newArrayList(extracted));
}
columnDesc = (ExprNodeColumnDesc) extracted[0];
} else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[0]).getChildren().get(0);
} else {
columnDesc = (ExprNodeColumnDesc) nodeOutputs[0];
}
String udfName = expr.getGenericUDF().getUdfName();
ExprNodeConstantDesc[] inConstantDescs = new ExprNodeConstantDesc[nodeOutputs.length - 1];
for (int i = 0, limit = inConstantDescs.length; i < limit; i++) {
if (!(nodeOutputs[i + 1] instanceof ExprNodeConstantDesc)) {
// adding for tez
return;
}
inConstantDescs[i] = (ExprNodeConstantDesc) nodeOutputs[i + 1];
}
searchConditions.add(new IndexSearchCondition(columnDesc, udfName, inConstantDescs, expr, fields, isNot));
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method getColumnExpr.
// Check if ExprNodeColumnDesc is wrapped in expr.
// If so, peel off. Otherwise return itself.
private ExprNodeDesc getColumnExpr(ExprNodeDesc expr) {
if (expr instanceof ExprNodeColumnDesc) {
return expr;
}
ExprNodeGenericFuncDesc funcDesc = null;
if (expr instanceof ExprNodeGenericFuncDesc) {
funcDesc = (ExprNodeGenericFuncDesc) expr;
}
if (null == funcDesc) {
return expr;
}
GenericUDF udf = funcDesc.getGenericUDF();
// check if its a simple cast expression.
if ((udf instanceof GenericUDFBridge || udf instanceof GenericUDFToBinary || udf instanceof GenericUDFToChar || udf instanceof GenericUDFToVarchar || udf instanceof GenericUDFToDecimal || udf instanceof GenericUDFToDate || udf instanceof GenericUDFToUnixTimeStamp || udf instanceof GenericUDFToUtcTimestamp) && funcDesc.getChildren().size() == 1 && funcDesc.getChildren().get(0) instanceof ExprNodeColumnDesc) {
return expr.getChildren().get(0);
}
return expr;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method analyzeExpr.
private ExprNodeDesc analyzeExpr(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition> searchConditions, Object... nodeOutputs) throws SemanticException {
if (FunctionRegistry.isOpAnd(expr)) {
assert (nodeOutputs.length == 2);
ExprNodeDesc residual1 = (ExprNodeDesc) nodeOutputs[0];
ExprNodeDesc residual2 = (ExprNodeDesc) nodeOutputs[1];
if (residual1 == null) {
return residual2;
}
if (residual2 == null) {
return residual1;
}
List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
residuals.add(residual1);
residuals.add(residual2);
return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry.getGenericUDFForAnd(), residuals);
}
GenericUDF genericUDF = expr.getGenericUDF();
if (!(genericUDF instanceof GenericUDFBaseCompare)) {
// 2015-10-22 Added by JeongMin Ju : Processing Between/In Operator
if (genericUDF instanceof GenericUDFBetween) {
// In case of not between, The value of first element of nodeOutputs is true.
// otherwise false.
processingBetweenOperator(expr, searchConditions, nodeOutputs);
return expr;
} else if (genericUDF instanceof GenericUDFIn) {
// In case of not in operator, in operator exist as child of not operator.
processingInOperator(expr, searchConditions, false, nodeOutputs);
return expr;
} else if (genericUDF instanceof GenericUDFOPNot && ((ExprNodeGenericFuncDesc) expr.getChildren().get(0)).getGenericUDF() instanceof GenericUDFIn) {
// In case of not in operator, in operator exist as child of not operator.
processingInOperator((ExprNodeGenericFuncDesc) expr.getChildren().get(0), searchConditions, true, ((ExprNodeGenericFuncDesc) nodeOutputs[0]).getChildren().toArray());
return expr;
} else if (genericUDF instanceof GenericUDFOPNull) {
processingNullOperator(expr, searchConditions, nodeOutputs);
return expr;
} else if (genericUDF instanceof GenericUDFOPNotNull) {
processingNotNullOperator(expr, searchConditions, nodeOutputs);
return expr;
} else {
return expr;
}
}
ExprNodeDesc expr1 = (ExprNodeDesc) nodeOutputs[0];
ExprNodeDesc expr2 = (ExprNodeDesc) nodeOutputs[1];
// user
if (expr1.getTypeInfo().equals(expr2.getTypeInfo())) {
expr1 = getColumnExpr(expr1);
expr2 = getColumnExpr(expr2);
}
ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair(expr1, expr2);
if (extracted == null || (extracted.length > 2 && !acceptsFields)) {
return expr;
}
ExprNodeColumnDesc columnDesc;
ExprNodeConstantDesc constantDesc;
if (extracted[0] instanceof ExprNodeConstantDesc) {
genericUDF = genericUDF.flip();
columnDesc = (ExprNodeColumnDesc) extracted[1];
constantDesc = (ExprNodeConstantDesc) extracted[0];
} else {
columnDesc = (ExprNodeColumnDesc) extracted[0];
constantDesc = (ExprNodeConstantDesc) extracted[1];
}
Set<String> allowed = columnToUDFs.get(columnDesc.getColumn());
if (allowed == null) {
return expr;
}
String udfName = genericUDF.getUdfName();
if (!allowed.contains(genericUDF.getUdfName())) {
return expr;
}
String[] fields = null;
if (extracted.length > 2) {
ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) extracted[2];
if (!isValidField(fieldDesc)) {
return expr;
}
fields = ExprNodeDescUtils.extractFields(fieldDesc);
}
// We also need to update the expr so that the index query can be
// generated.
// Note that, hive does not support UDFToDouble etc in the query text.
List<ExprNodeDesc> list = new ArrayList<ExprNodeDesc>();
list.add(expr1);
list.add(expr2);
expr = new ExprNodeGenericFuncDesc(expr.getTypeInfo(), expr.getGenericUDF(), list);
searchConditions.add(new IndexSearchCondition(columnDesc, udfName, constantDesc, expr, fields));
// remove it from the residual predicate
return fields == null ? null : expr;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method processingBetweenOperator.
private void processingBetweenOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition> searchConditions, Object... nodeOutputs) {
ExprNodeColumnDesc columnDesc = null;
String[] fields = null;
if (nodeOutputs[1] instanceof ExprNodeFieldDesc) {
// rowKey field
ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[1];
fields = ExprNodeDescUtils.extractFields(fieldDesc);
ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc) nodeOutputs[1], (ExprNodeDesc) nodeOutputs[2]);
columnDesc = (ExprNodeColumnDesc) extracted[0];
} else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[1]).getChildren().get(0);
} else {
columnDesc = (ExprNodeColumnDesc) nodeOutputs[1];
}
String udfName = expr.getGenericUDF().getUdfName();
ExprNodeConstantDesc[] betweenConstants = new ExprNodeConstantDesc[] { (ExprNodeConstantDesc) nodeOutputs[2], (ExprNodeConstantDesc) nodeOutputs[3] };
boolean isNot = (Boolean) ((ExprNodeConstantDesc) nodeOutputs[0]).getValue();
searchConditions.add(new IndexSearchCondition(columnDesc, udfName, betweenConstants, expr, fields, isNot));
}
Aggregations