use of org.apache.calcite.rex.RexFieldCollation in project drill by apache.
the class DrillOptiqTest method testUnsupportedRexNode.
/* Method checks if we raise the appropriate error while dealing with RexNode that cannot be converted to
* equivalent Drill expressions
*/
@Test
public void testUnsupportedRexNode() {
try {
// Create the data type factory.
RelDataTypeFactory relFactory = new SqlTypeFactoryImpl(DrillRelDataTypeSystem.DRILL_REL_DATATYPE_SYSTEM);
// Create the rex builder
RexBuilder rex = new RexBuilder(relFactory);
RelDataType anyType = relFactory.createSqlType(SqlTypeName.ANY);
List<RexNode> emptyList = new LinkedList<>();
ImmutableList<RexFieldCollation> e = ImmutableList.copyOf(new RexFieldCollation[0]);
// create a dummy RexOver object.
RexNode window = rex.makeOver(anyType, SqlStdOperatorTable.AVG, emptyList, emptyList, e, null, null, true, false, false);
DrillOptiq.toDrill(null, (RelNode) null, window);
} catch (UserException e) {
if (e.getMessage().contains(DrillOptiq.UNSUPPORTED_REX_NODE_ERROR)) {
// got expected error return
return;
}
Assert.fail("Hit exception with unexpected error message");
}
Assert.fail("Failed to raise the expected exception");
}
use of org.apache.calcite.rex.RexFieldCollation in project hive by apache.
the class ExprNodeConverter method getPSpec.
private PartitioningSpec getPSpec(RexWindow window) {
PartitioningSpec partitioning = new PartitioningSpec();
Schema schema = new Schema(tabAlias, inputRowType.getFieldList());
if (window.partitionKeys != null && !window.partitionKeys.isEmpty()) {
PartitionSpec pSpec = new PartitionSpec();
for (RexNode pk : window.partitionKeys) {
PartitionExpression exprSpec = new PartitionExpression();
ASTNode astNode = pk.accept(new RexVisitor(schema));
exprSpec.setExpression(astNode);
pSpec.addExpression(exprSpec);
}
partitioning.setPartSpec(pSpec);
}
if (window.orderKeys != null && !window.orderKeys.isEmpty()) {
OrderSpec oSpec = new OrderSpec();
for (RexFieldCollation ok : window.orderKeys) {
OrderExpression exprSpec = new OrderExpression();
Order order = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? Order.ASC : Order.DESC;
NullOrder nullOrder;
if (ok.right.contains(SqlKind.NULLS_FIRST)) {
nullOrder = NullOrder.NULLS_FIRST;
} else if (ok.right.contains(SqlKind.NULLS_LAST)) {
nullOrder = NullOrder.NULLS_LAST;
} else {
// Default
nullOrder = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? NullOrder.NULLS_FIRST : NullOrder.NULLS_LAST;
}
exprSpec.setOrder(order);
exprSpec.setNullOrder(nullOrder);
ASTNode astNode = ok.left.accept(new RexVisitor(schema));
exprSpec.setExpression(astNode);
oSpec.addExpression(exprSpec);
}
partitioning.setOrderSpec(oSpec);
}
return partitioning;
}
use of org.apache.calcite.rex.RexFieldCollation in project hive by apache.
the class HiveWindowingFixRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
Project project = call.rel(0);
Aggregate aggregate = call.rel(1);
// 1. We go over the expressions in the project operator
// and we separate the windowing nodes that are result
// of an aggregate expression from the rest of nodes
final int groupingFields = aggregate.getGroupCount() + aggregate.getIndicatorCount();
Set<String> projectExprsDigest = new HashSet<String>();
Map<String, RexNode> windowingExprsDigestToNodes = new HashMap<String, RexNode>();
for (RexNode r : project.getChildExps()) {
if (r instanceof RexOver) {
RexOver rexOverNode = (RexOver) r;
// Operands
for (RexNode operand : rexOverNode.getOperands()) {
if (operand instanceof RexInputRef && ((RexInputRef) operand).getIndex() >= groupingFields) {
windowingExprsDigestToNodes.put(operand.toString(), operand);
}
}
// Partition keys
for (RexNode partitionKey : rexOverNode.getWindow().partitionKeys) {
if (partitionKey instanceof RexInputRef && ((RexInputRef) partitionKey).getIndex() >= groupingFields) {
windowingExprsDigestToNodes.put(partitionKey.toString(), partitionKey);
}
}
// Order keys
for (RexFieldCollation orderKey : rexOverNode.getWindow().orderKeys) {
if (orderKey.left instanceof RexInputRef && ((RexInputRef) orderKey.left).getIndex() >= groupingFields) {
windowingExprsDigestToNodes.put(orderKey.left.toString(), orderKey.left);
}
}
} else {
projectExprsDigest.add(r.toString());
}
}
// 2. We check whether there is a column needed by the
// windowing operation that is missing in the
// project expressions. For instance, if the windowing
// operation is over an aggregation column, Hive expects
// that column to be in the Select clause of the query.
// The idea is that if there is a column missing, we will
// replace the old project operator by two new project
// operators:
// - a project operator containing the original columns
// of the project operator plus all the columns that were
// missing
// - a project on top of the previous one, that will take
// out the columns that were missing and were added by the
// previous project
// These data structures are needed to create the new project
// operator (below)
final List<RexNode> belowProjectExprs = new ArrayList<RexNode>();
final List<String> belowProjectColumnNames = new ArrayList<String>();
// This data structure is needed to create the new project
// operator (top)
final List<RexNode> topProjectExprs = new ArrayList<RexNode>();
final int projectCount = project.getChildExps().size();
for (int i = 0; i < projectCount; i++) {
belowProjectExprs.add(project.getChildExps().get(i));
belowProjectColumnNames.add(project.getRowType().getFieldNames().get(i));
topProjectExprs.add(RexInputRef.of(i, project.getRowType()));
}
boolean windowingFix = false;
for (Entry<String, RexNode> windowingExpr : windowingExprsDigestToNodes.entrySet()) {
if (!projectExprsDigest.contains(windowingExpr.getKey())) {
windowingFix = true;
belowProjectExprs.add(windowingExpr.getValue());
int colIndex = 0;
String alias = "window_col_" + colIndex;
while (belowProjectColumnNames.contains(alias)) {
alias = "window_col_" + (colIndex++);
}
belowProjectColumnNames.add(alias);
}
}
if (!windowingFix) {
// We do not need to do anything, we bail out
return;
}
// 3. We need to fix it, we create the two replacement project
// operators
RelNode newProjectRel = projectFactory.createProject(aggregate, belowProjectExprs, belowProjectColumnNames);
RelNode newTopProjectRel = projectFactory.createProject(newProjectRel, topProjectExprs, project.getRowType().getFieldNames());
call.transformTo(newTopProjectRel);
}
Aggregations