use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class HiveCalciteUtil method getExprNode.
public static ExprNodeDesc getExprNode(Integer inputRefIndx, RelNode inputRel, ExprNodeConverter exprConv) {
ExprNodeDesc exprNode = null;
RexNode rexInputRef = new RexInputRef(inputRefIndx, inputRel.getRowType().getFieldList().get(inputRefIndx).getType());
exprNode = rexInputRef.accept(exprConv);
return exprNode;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class HiveRexExecutorImpl method reduce.
@Override
public void reduce(RexBuilder rexBuilder, List<RexNode> constExps, List<RexNode> reducedValues) {
RexNodeConverter rexNodeConverter = new RexNodeConverter(cluster);
for (RexNode rexNode : constExps) {
// initialize the converter
ExprNodeConverter converter = new ExprNodeConverter("", null, null, null, new HashSet<Integer>(), cluster.getTypeFactory());
// convert RexNode to ExprNodeGenericFuncDesc
ExprNodeDesc expr = rexNode.accept(converter);
if (expr instanceof ExprNodeGenericFuncDesc) {
// folding the constant
ExprNodeDesc constant = ConstantPropagateProcFactory.foldExpr((ExprNodeGenericFuncDesc) expr);
if (constant != null) {
try {
// convert constant back to RexNode
reducedValues.add(rexNodeConverter.convert((ExprNodeConstantDesc) constant));
} catch (Exception e) {
LOG.warn(e.getMessage());
reducedValues.add(rexNode);
}
} else {
reducedValues.add(rexNode);
}
} else {
reducedValues.add(rexNode);
}
}
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class CorrelationUtilities method removeReduceSinkForGroupBy.
protected static void removeReduceSinkForGroupBy(ReduceSinkOperator cRS, GroupByOperator cGBYr, ParseContext context, AbstractCorrelationProcCtx procCtx) throws SemanticException {
Operator<?> parent = getSingleParent(cRS);
if ((parent instanceof GroupByOperator) && procCtx.isMapAggr()) {
// pRS-cGBYm-cRS-cGBYr (map aggregation) --> pRS-cGBYr(COMPLETE)
// copies desc of cGBYm to cGBYr and remove cGBYm and cRS
GroupByOperator cGBYm = (GroupByOperator) parent;
cGBYr.getConf().setKeys(ExprNodeDescUtils.backtrack(ExprNodeDescUtils.backtrack(cGBYr.getConf().getKeys(), cGBYr, cRS), cRS, cGBYm));
cGBYr.getConf().setAggregators(cGBYm.getConf().getAggregators());
for (AggregationDesc aggr : cGBYm.getConf().getAggregators()) {
aggr.setMode(GenericUDAFEvaluator.Mode.COMPLETE);
}
cGBYr.setColumnExprMap(cGBYm.getColumnExprMap());
cGBYr.setSchema(cGBYm.getSchema());
} else {
// pRS-cRS-cGBYr (no map aggregation) --> pRS-cGBYr(COMPLETE)
// revert expressions of cGBYr to that of cRS
cGBYr.getConf().setKeys(ExprNodeDescUtils.backtrack(cGBYr.getConf().getKeys(), cGBYr, cRS));
for (AggregationDesc aggr : cGBYr.getConf().getAggregators()) {
aggr.setParameters(ExprNodeDescUtils.backtrack(aggr.getParameters(), cGBYr, cRS));
}
Map<String, ExprNodeDesc> oldMap = cGBYr.getColumnExprMap();
RowSchema oldRS = cGBYr.getSchema();
Map<String, ExprNodeDesc> newMap = new HashMap<String, ExprNodeDesc>();
ArrayList<ColumnInfo> newRS = new ArrayList<ColumnInfo>();
List<String> outputCols = cGBYr.getConf().getOutputColumnNames();
for (int i = 0; i < outputCols.size(); i++) {
String colName = outputCols.get(i);
ColumnInfo colInfo = oldRS.getColumnInfo(colName);
newRS.add(colInfo);
ExprNodeDesc colExpr = ExprNodeDescUtils.backtrack(oldMap.get(colName), cGBYr, cRS);
if (colExpr != null) {
newMap.put(colInfo.getInternalName(), colExpr);
}
}
cGBYr.setColumnExprMap(newMap);
cGBYr.setSchema(new RowSchema(newRS));
}
cGBYr.getConf().setMode(GroupByDesc.Mode.COMPLETE);
removeOperator(cRS, cGBYr, parent, context);
procCtx.addRemovedOperator(cRS);
if ((parent instanceof GroupByOperator) && procCtx.isMapAggr()) {
removeOperator(parent, cGBYr, getSingleParent(parent), context);
procCtx.addRemovedOperator(cGBYr);
}
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class HiveOpConverter method genReduceSinkAndBacktrackSelect.
private static SelectOperator genReduceSinkAndBacktrackSelect(Operator<?> input, ExprNodeDesc[] keys, int tag, ArrayList<ExprNodeDesc> partitionCols, String order, String nullOrder, int numReducers, Operation acidOperation, HiveConf hiveConf, List<String> keepColNames) throws SemanticException {
// 1. Generate RS operator
// 1.1 Prune the tableNames, only count the tableNames that are not empty strings
// as empty string in table aliases is only allowed for virtual columns.
String tableAlias = null;
Set<String> tableNames = input.getSchema().getTableNames();
for (String tableName : tableNames) {
if (tableName != null) {
if (tableName.length() == 0) {
if (tableAlias == null) {
tableAlias = tableName;
}
} else {
if (tableAlias == null || tableAlias.length() == 0) {
tableAlias = tableName;
} else {
if (!tableName.equals(tableAlias)) {
throw new SemanticException("In CBO return path, genReduceSinkAndBacktrackSelect is expecting only one tableAlias but there is more than one");
}
}
}
}
}
if (tableAlias == null) {
throw new SemanticException("In CBO return path, genReduceSinkAndBacktrackSelect is expecting only one tableAlias but there is none");
}
// 1.2 Now generate RS operator
ReduceSinkOperator rsOp = genReduceSink(input, tableAlias, keys, tag, partitionCols, order, nullOrder, numReducers, acidOperation, hiveConf);
// 2. Generate backtrack Select operator
Map<String, ExprNodeDesc> descriptors = buildBacktrackFromReduceSink(keepColNames, rsOp.getConf().getOutputKeyColumnNames(), rsOp.getConf().getOutputValueColumnNames(), rsOp.getValueIndex(), input);
SelectDesc selectDesc = new SelectDesc(new ArrayList<ExprNodeDesc>(descriptors.values()), new ArrayList<String>(descriptors.keySet()));
ArrayList<ColumnInfo> cinfoLst = createColInfosSubset(input, keepColNames);
SelectOperator selectOp = (SelectOperator) OperatorFactory.getAndMakeChild(selectDesc, new RowSchema(cinfoLst), rsOp);
selectOp.setColumnExprMap(descriptors);
if (LOG.isDebugEnabled()) {
LOG.debug("Generated " + selectOp + " with row schema: [" + selectOp.getSchema() + "]");
}
return selectOp;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeDesc in project hive by apache.
the class HiveOpConverter method buildBacktrackFromReduceSink.
private static Map<String, ExprNodeDesc> buildBacktrackFromReduceSink(List<String> keepColNames, List<String> keyColNames, List<String> valueColNames, int[] index, Operator<?> inputOp) {
Map<String, ExprNodeDesc> columnDescriptors = new LinkedHashMap<String, ExprNodeDesc>();
int pos = 0;
for (int i = 0; i < index.length; i++) {
ColumnInfo info = inputOp.getSchema().getSignature().get(i);
if (pos < keepColNames.size() && info.getInternalName().equals(keepColNames.get(pos))) {
String field;
if (index[i] >= 0) {
field = Utilities.ReduceField.KEY + "." + keyColNames.get(index[i]);
} else {
field = Utilities.ReduceField.VALUE + "." + valueColNames.get(-index[i] - 1);
}
ExprNodeColumnDesc desc = new ExprNodeColumnDesc(info.getType(), field, info.getTabAlias(), info.getIsVirtualCol());
columnDescriptors.put(keepColNames.get(pos), desc);
pos++;
}
}
return columnDescriptors;
}
Aggregations