use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project hive by apache.
the class IndexPredicateAnalyzer method analyzeExpr.
private ExprNodeDesc analyzeExpr(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition> searchConditions, Object... nodeOutputs) throws SemanticException {
if (FunctionRegistry.isOpAnd(expr)) {
assert (nodeOutputs.length >= 2);
List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
for (Object residual : nodeOutputs) {
if (null != residual) {
residuals.add((ExprNodeDesc) residual);
}
}
if (residuals.size() == 0) {
return null;
} else if (residuals.size() == 1) {
return residuals.get(0);
} else if (residuals.size() > 1) {
return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry.getGenericUDFForAnd(), residuals);
}
}
GenericUDF genericUDF = expr.getGenericUDF();
if (!(genericUDF instanceof GenericUDFBaseCompare)) {
return expr;
}
ExprNodeDesc expr1 = (ExprNodeDesc) nodeOutputs[0];
ExprNodeDesc expr2 = (ExprNodeDesc) nodeOutputs[1];
// We may need to peel off the GenericUDFBridge that is added by CBO or user
if (expr1.getTypeInfo().equals(expr2.getTypeInfo())) {
expr1 = getColumnExpr(expr1);
expr2 = getColumnExpr(expr2);
}
ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair(expr1, expr2);
if (extracted == null || (extracted.length > 2 && !acceptsFields)) {
return expr;
}
ExprNodeColumnDesc columnDesc;
ExprNodeConstantDesc constantDesc;
if (extracted[0] instanceof ExprNodeConstantDesc) {
genericUDF = genericUDF.flip();
columnDesc = (ExprNodeColumnDesc) extracted[1];
constantDesc = (ExprNodeConstantDesc) extracted[0];
} else {
columnDesc = (ExprNodeColumnDesc) extracted[0];
constantDesc = (ExprNodeConstantDesc) extracted[1];
}
Set<String> allowed = columnToUDFs.get(columnDesc.getColumn());
if (allowed == null) {
return expr;
}
String udfName = genericUDF.getUdfName();
if (!allowed.contains(genericUDF.getUdfName())) {
return expr;
}
String[] fields = null;
if (extracted.length > 2) {
ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) extracted[2];
if (!isValidField(fieldDesc)) {
return expr;
}
fields = ExprNodeDescUtils.extractFields(fieldDesc);
}
// We also need to update the expr so that the index query can be generated.
// Note that, hive does not support UDFToDouble etc in the query text.
List<ExprNodeDesc> list = new ArrayList<ExprNodeDesc>();
list.add(expr1);
list.add(expr2);
ExprNodeGenericFuncDesc indexExpr = new ExprNodeGenericFuncDesc(expr.getTypeInfo(), expr.getGenericUDF(), list);
searchConditions.add(new IndexSearchCondition(columnDesc, udfName, constantDesc, indexExpr, expr, fields));
// remove it from the residual predicate
return fields == null ? null : expr;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project hive by apache.
the class ExprNodeConverter method visitFieldAccess.
/**
* TODO: Handle 1) cast 2), Windowing Agg Call
*/
@Override
public /*
* Handles expr like struct(key,value).key
* Follows same rules as TypeCheckProcFactory::getXpathOrFuncExprNodeDesc()
* which is equivalent version of parsing such an expression from AST
*/
ExprNodeDesc visitFieldAccess(RexFieldAccess fieldAccess) {
ExprNodeDesc parent = fieldAccess.getReferenceExpr().accept(this);
String child = fieldAccess.getField().getName();
TypeInfo parentType = parent.getTypeInfo();
// Allow accessing a field of list element structs directly from a list
boolean isList = (parentType.getCategory() == ObjectInspector.Category.LIST);
if (isList) {
parentType = ((ListTypeInfo) parentType).getListElementTypeInfo();
}
TypeInfo t = ((StructTypeInfo) parentType).getStructFieldTypeInfo(child);
return new ExprNodeFieldDesc(t, parent, child, isList);
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project hive by apache.
the class TestColumnPrunerProcCtx method testGetSelectNestedColPathsFromChildren1.
// Test select root.col1.a from root:struct<col1:struct<a:boolean,b:double>,col2:double>
@Test
public void testGetSelectNestedColPathsFromChildren1() {
ColumnPrunerProcCtx ctx = new ColumnPrunerProcCtx(null);
ExprNodeDesc colDesc = new ExprNodeColumnDesc(col3Type, "root", "test", false);
ExprNodeDesc col1 = new ExprNodeFieldDesc(col1Type, colDesc, "col1", false);
ExprNodeDesc fieldDesc = new ExprNodeFieldDesc(TypeInfoFactory.booleanTypeInfo, col1, "a", false);
final List<FieldNode> paths = Arrays.asList(new FieldNode("_col0"));
SelectOperator selectOperator = buildSelectOperator(Arrays.asList(fieldDesc), paths);
List<FieldNode> groups = ctx.getSelectColsFromChildren(selectOperator, paths);
compareTestResults(groups, "root.col1.a");
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project hive by apache.
the class TestColumnPrunerProcCtx method testGetSelectNestedColPathsFromChildren3.
// Test select root.col2 from root:struct<col1:struct<a:boolean,b:double>,col2:double>
@Test
public void testGetSelectNestedColPathsFromChildren3() {
ColumnPrunerProcCtx ctx = new ColumnPrunerProcCtx(null);
ExprNodeDesc colDesc = new ExprNodeColumnDesc(col3Type, "root", "test", false);
ExprNodeDesc fieldDesc = new ExprNodeFieldDesc(col1Type, colDesc, "col2", false);
final List<FieldNode> paths = Arrays.asList(new FieldNode("_col0"));
SelectOperator selectOperator = buildSelectOperator(Arrays.asList(fieldDesc), paths);
List<FieldNode> groups = ctx.getSelectColsFromChildren(selectOperator, paths);
compareTestResults(groups, "root.col2");
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc in project hive by apache.
the class TestColumnPrunerProcCtx method testGetSelectNestedColPathsFromChildren7.
// Test select pow(root.col1.b, root.col2) from table test(root
// struct<col1:struct<a:boolean,b:double>, col2:double>);
@Test
public void testGetSelectNestedColPathsFromChildren7() {
ColumnPrunerProcCtx ctx = new ColumnPrunerProcCtx(null);
ExprNodeDesc colDesc = new ExprNodeColumnDesc(col3Type, "root", "test", false);
ExprNodeDesc col1 = new ExprNodeFieldDesc(col1Type, colDesc, "col1", false);
ExprNodeDesc fieldDesc1 = new ExprNodeFieldDesc(TypeInfoFactory.doubleTypeInfo, col1, "b", false);
colDesc = new ExprNodeColumnDesc(col3Type, "root", "test", false);
ExprNodeDesc col2 = new ExprNodeFieldDesc(col2Type, colDesc, "col2", false);
final List<FieldNode> paths = Arrays.asList(new FieldNode("_col0"));
GenericUDF udf = mock(GenericUDFPower.class);
List<ExprNodeDesc> list = new ArrayList<>();
list.add(fieldDesc1);
list.add(col2);
ExprNodeDesc funcDesc = new ExprNodeGenericFuncDesc(TypeInfoFactory.doubleTypeInfo, udf, "pow", list);
SelectOperator selectOperator = buildSelectOperator(Arrays.asList(funcDesc), paths);
List<FieldNode> groups = ctx.getSelectColsFromChildren(selectOperator, paths);
compareTestResults(groups, "root.col1.b", "root.col2");
}
Aggregations