use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class TestAccumuloRangeGenerator method testRangeOverNonRowIdField.
@Test
public void testRangeOverNonRowIdField() throws Exception {
// foo >= 'f'
ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "foo", null, false);
ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "f");
List<ExprNodeDesc> children = Lists.newArrayList();
children.add(column);
children.add(constant);
ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, new GenericUDFOPEqualOrGreaterThan(), children);
assertNotNull(node);
// foo <= 'm'
ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "foo", null, false);
ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "m");
List<ExprNodeDesc> children2 = Lists.newArrayList();
children2.add(column2);
children2.add(constant2);
ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, new GenericUDFOPEqualOrLessThan(), children2);
assertNotNull(node2);
// And UDF
List<ExprNodeDesc> bothFilters = Lists.newArrayList();
bothFilters.add(node);
bothFilters.add(node2);
ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, new GenericUDFOPAnd(), bothFilters);
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, Collections.<Rule, NodeProcessor>emptyMap(), null);
GraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(both);
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
try {
ogw.startWalking(topNodes, nodeOutput);
} catch (SemanticException ex) {
throw new RuntimeException(ex);
}
// Filters are not over the rowid, therefore scan everything
Object result = nodeOutput.get(both);
Assert.assertNull(result);
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class QueryConditionBuilder method createConditionString.
/*
* Walk to Hive AST and translate the hive column names to their equivalent mappings. This is basically a cheat.
*
*/
private String createConditionString(String filterXml, Map<String, String> columnMap) {
if ((filterXml == null) || (filterXml.trim().isEmpty())) {
return EMPTY_STRING;
}
try (XMLDecoder decoder = new XMLDecoder(new ByteArrayInputStream(filterXml.getBytes("UTF-8")))) {
Object object = decoder.readObject();
if (!(object instanceof ExprNodeDesc)) {
LOGGER.error("Deserialized filter expression is not of the expected type");
throw new RuntimeException("Deserialized filter expression is not of the expected type");
}
ExprNodeDesc conditionNode = (ExprNodeDesc) object;
walkTreeAndTranslateColumnNames(conditionNode, columnMap);
return conditionNode.getExprString();
} catch (Exception e) {
LOGGER.error("Error during condition build", e);
return EMPTY_STRING;
}
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class QueryConditionBuilder method walkTreeAndTranslateColumnNames.
/*
* Translate column names by walking the AST
*/
private void walkTreeAndTranslateColumnNames(ExprNodeDesc node, Map<String, String> columnMap) {
if (node == null) {
return;
}
if (node instanceof ExprNodeColumnDesc) {
ExprNodeColumnDesc column = (ExprNodeColumnDesc) node;
String hiveColumnName = column.getColumn().toLowerCase();
if (columnMap.containsKey(hiveColumnName)) {
String dbColumnName = columnMap.get(hiveColumnName);
String finalName = formatColumnName(dbColumnName);
column.setColumn(finalName);
}
} else {
if (node.getChildren() != null) {
for (ExprNodeDesc childNode : node.getChildren()) {
walkTreeAndTranslateColumnNames(childNode, columnMap);
}
}
}
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class ExprWalkerInfo method getResidualPredicates.
public Map<String, List<ExprNodeDesc>> getResidualPredicates(boolean clear) {
Map<String, List<ExprNodeDesc>> oldExprs = new HashMap<String, List<ExprNodeDesc>>();
for (Map.Entry<String, List<ExprNodeDesc>> entry : nonFinalPreds.entrySet()) {
List<ExprNodeDesc> converted = new ArrayList<ExprNodeDesc>();
for (ExprNodeDesc newExpr : entry.getValue()) {
converted.add(newToOldExprMap.get(newExpr));
}
oldExprs.put(entry.getKey(), converted);
}
if (clear) {
nonFinalPreds.clear();
}
return oldExprs;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class PredicateTransitivePropagate method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
pGraphContext = pctx;
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("R1", "(" + FilterOperator.getOperatorName() + "%" + ReduceSinkOperator.getOperatorName() + "%" + JoinOperator.getOperatorName() + "%)"), new JoinTransitive());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
TransitiveContext context = new TransitiveContext();
Dispatcher disp = new DefaultRuleDispatcher(null, opRules, context);
GraphWalker ogw = new LevelOrderWalker(disp, 2);
// Create a list of topop nodes
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pGraphContext.getTopOps().values());
ogw.startWalking(topNodes, null);
Map<ReduceSinkOperator, List<ExprNodeDesc>> newFilters = context.getNewfilters();
// insert new filter between RS and parent of RS
for (Map.Entry<ReduceSinkOperator, List<ExprNodeDesc>> entry : newFilters.entrySet()) {
ReduceSinkOperator reducer = entry.getKey();
Operator<?> parent = reducer.getParentOperators().get(0);
List<ExprNodeDesc> exprs = entry.getValue();
if (parent instanceof FilterOperator) {
exprs = ExprNodeDescUtils.split(((FilterOperator) parent).getConf().getPredicate(), exprs);
ExprNodeDesc merged = ExprNodeDescUtils.mergePredicates(exprs);
((FilterOperator) parent).getConf().setPredicate(merged);
} else {
ExprNodeDesc merged = ExprNodeDescUtils.mergePredicates(exprs);
RowSchema parentRS = parent.getSchema();
Operator<FilterDesc> newFilter = createFilter(reducer, parent, parentRS, merged);
}
}
return pGraphContext;
}
Aggregations