use of org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec in project hive by apache.
the class WindowingSpec method effectiveWindowFrame.
/*
* - A Window Frame that has only the start boundary, then it is interpreted as:
* BETWEEN <start boundary> AND CURRENT ROW
* - A Window Specification with an Order Specification and no Window Frame is
* interpreted as: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
* - A Window Specification with no Order and no Window Frame is interpreted as:
* ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
*/
private void effectiveWindowFrame(WindowFunctionSpec wFn) throws SemanticException {
WindowSpec wdwSpec = wFn.getWindowSpec();
WindowFunctionInfo wFnInfo = FunctionRegistry.getWindowFunctionInfo(wFn.getName());
boolean supportsWindowing = wFnInfo == null ? true : wFnInfo.isSupportsWindow();
WindowFrameSpec wFrame = wdwSpec.getWindowFrame();
OrderSpec orderSpec = wdwSpec.getOrder();
if (wFrame == null) {
if (!supportsWindowing) {
if (wFn.getName().toLowerCase().equals(FunctionRegistry.LAST_VALUE_FUNC_NAME) && orderSpec != null) {
/*
* last_value: when an Sort Key is specified, then last_value should return the
* last value among rows with the same Sort Key value.
*/
wFrame = new WindowFrameSpec(WindowType.ROWS, new BoundarySpec(Direction.CURRENT), new BoundarySpec(Direction.FOLLOWING, 0));
} else {
wFrame = new WindowFrameSpec(WindowType.ROWS, new BoundarySpec(Direction.PRECEDING, BoundarySpec.UNBOUNDED_AMOUNT), new BoundarySpec(Direction.FOLLOWING, BoundarySpec.UNBOUNDED_AMOUNT));
}
} else {
if (orderSpec == null) {
wFrame = new WindowFrameSpec(WindowType.ROWS, new BoundarySpec(Direction.PRECEDING, BoundarySpec.UNBOUNDED_AMOUNT), new BoundarySpec(Direction.FOLLOWING, BoundarySpec.UNBOUNDED_AMOUNT));
} else {
wFrame = new WindowFrameSpec(WindowType.RANGE, new BoundarySpec(Direction.PRECEDING, BoundarySpec.UNBOUNDED_AMOUNT), new BoundarySpec(Direction.CURRENT));
}
}
wdwSpec.setWindowFrame(wFrame);
} else if (wFrame.getEnd() == null) {
wFrame.setEnd(new BoundarySpec(Direction.CURRENT));
}
}
use of org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec in project hive by apache.
the class SemanticAnalyzer method processPTFPartitionSpec.
private PartitioningSpec processPTFPartitionSpec(ASTNode pSpecNode) {
PartitioningSpec partitioning = new PartitioningSpec();
ASTNode firstChild = (ASTNode) pSpecNode.getChild(0);
int type = firstChild.getType();
int exprCnt;
if (type == HiveParser.TOK_DISTRIBUTEBY || type == HiveParser.TOK_CLUSTERBY) {
PartitionSpec pSpec = processPartitionSpec(firstChild);
partitioning.setPartSpec(pSpec);
ASTNode sortNode = pSpecNode.getChildCount() > 1 ? (ASTNode) pSpecNode.getChild(1) : null;
if (sortNode != null) {
OrderSpec oSpec = processOrderSpec(sortNode);
partitioning.setOrderSpec(oSpec);
}
} else if (type == HiveParser.TOK_SORTBY || type == HiveParser.TOK_ORDERBY) {
ASTNode sortNode = firstChild;
OrderSpec oSpec = processOrderSpec(sortNode);
partitioning.setOrderSpec(oSpec);
}
return partitioning;
}
use of org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec in project hive by apache.
the class SemanticAnalyzer method processOrderSpec.
private OrderSpec processOrderSpec(ASTNode sortNode) {
OrderSpec oSpec = new OrderSpec();
int exprCnt = sortNode.getChildCount();
for (int i = 0; i < exprCnt; i++) {
OrderExpression exprSpec = new OrderExpression();
ASTNode orderSpec = (ASTNode) sortNode.getChild(i);
ASTNode nullOrderSpec = (ASTNode) orderSpec.getChild(0);
exprSpec.setExpression((ASTNode) nullOrderSpec.getChild(0));
if (orderSpec.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) {
exprSpec.setOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.ASC);
} else {
exprSpec.setOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.DESC);
}
if (nullOrderSpec.getType() == HiveParser.TOK_NULLS_FIRST) {
exprSpec.setNullOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder.NULLS_FIRST);
} else {
exprSpec.setNullOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder.NULLS_LAST);
}
oSpec.addExpression(exprSpec);
}
return oSpec;
}
use of org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec in project hive by apache.
the class ExprNodeConverter method getPSpec.
private PartitioningSpec getPSpec(RexWindow window) {
PartitioningSpec partitioning = new PartitioningSpec();
Schema schema = new Schema(tabAlias, inputRowType.getFieldList());
if (window.partitionKeys != null && !window.partitionKeys.isEmpty()) {
PartitionSpec pSpec = new PartitionSpec();
for (RexNode pk : window.partitionKeys) {
PartitionExpression exprSpec = new PartitionExpression();
ASTNode astNode = pk.accept(new RexVisitor(schema));
exprSpec.setExpression(astNode);
pSpec.addExpression(exprSpec);
}
partitioning.setPartSpec(pSpec);
}
if (window.orderKeys != null && !window.orderKeys.isEmpty()) {
OrderSpec oSpec = new OrderSpec();
for (RexFieldCollation ok : window.orderKeys) {
OrderExpression exprSpec = new OrderExpression();
Order order = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? Order.ASC : Order.DESC;
NullOrder nullOrder;
if (ok.right.contains(SqlKind.NULLS_FIRST)) {
nullOrder = NullOrder.NULLS_FIRST;
} else if (ok.right.contains(SqlKind.NULLS_LAST)) {
nullOrder = NullOrder.NULLS_LAST;
} else {
// Default
nullOrder = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? NullOrder.NULLS_FIRST : NullOrder.NULLS_LAST;
}
exprSpec.setOrder(order);
exprSpec.setNullOrder(nullOrder);
ASTNode astNode = ok.left.accept(new RexVisitor(schema));
exprSpec.setExpression(astNode);
oSpec.addExpression(exprSpec);
}
partitioning.setOrderSpec(oSpec);
}
return partitioning;
}
use of org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec in project hive by apache.
the class PTFTranslator method componentize.
public static ArrayList<PTFInvocationSpec> componentize(PTFInvocationSpec ptfInvocation) throws SemanticException {
ArrayList<PTFInvocationSpec> componentInvocations = new ArrayList<PTFInvocationSpec>();
Stack<PTFInputSpec> ptfChain = new Stack<PTFInvocationSpec.PTFInputSpec>();
PTFInputSpec spec = ptfInvocation.getFunction();
while (spec instanceof PartitionedTableFunctionSpec) {
ptfChain.push(spec);
spec = spec.getInput();
}
PartitionedTableFunctionSpec prevFn = (PartitionedTableFunctionSpec) ptfChain.pop();
applyConstantPartition(prevFn);
PartitionSpec partSpec = prevFn.getPartition();
OrderSpec orderSpec = prevFn.getOrder();
if (partSpec == null) {
// oops this should have been caught before trying to componentize
throw new SemanticException("No Partitioning specification specified at start of a PTFChain");
}
if (orderSpec == null) {
orderSpec = new OrderSpec(partSpec);
prevFn.setOrder(orderSpec);
}
while (!ptfChain.isEmpty()) {
PartitionedTableFunctionSpec currentFn = (PartitionedTableFunctionSpec) ptfChain.pop();
String fnName = currentFn.getName();
if (!FunctionRegistry.isTableFunction(fnName)) {
throw new SemanticException(ErrorMsg.INVALID_FUNCTION.getMsg(fnName));
}
boolean transformsRawInput = FunctionRegistry.getTableFunctionResolver(fnName).transformsRawInput();
/*
* if the current table function has no partition info specified: inherit it from the PTF up
* the chain.
*/
if (currentFn.getPartition() == null) {
currentFn.setPartition(prevFn.getPartition());
if (currentFn.getOrder() == null) {
currentFn.setOrder(prevFn.getOrder());
}
}
/*
* If the current table function has no order info specified;
*/
if (currentFn.getOrder() == null) {
currentFn.setOrder(new OrderSpec(currentFn.getPartition()));
}
if (!currentFn.getPartition().equals(partSpec) || !currentFn.getOrder().equals(orderSpec) || transformsRawInput) {
PTFInvocationSpec component = new PTFInvocationSpec();
component.setFunction(prevFn);
componentInvocations.add(component);
PTFQueryInputSpec cQInSpec = new PTFQueryInputSpec();
cQInSpec.setType(PTFQueryInputType.PTFCOMPONENT);
currentFn.setInput(cQInSpec);
}
prevFn = currentFn;
partSpec = prevFn.getPartition();
orderSpec = prevFn.getOrder();
}
componentInvocations.add(ptfInvocation);
return componentInvocations;
}
Aggregations