use of org.voltdb.plannodes.OrderByPlanNode in project voltdb by VoltDB.
the class InlineOrderByIntoMergeReceive method applyOptimization.
/**
* Convert ReceivePlanNodes into MergeReceivePlanNodes when the
* RECEIVE node's nearest parent is a window function. We won't
* have any inline limits or aggregates here, so this is somewhat
* simpler than the order by case.
*
* @param plan
* @return
*/
private AbstractPlanNode applyOptimization(WindowFunctionPlanNode plan) {
assert (plan.getChildCount() == 1);
assert (plan.getChild(0) != null);
AbstractPlanNode child = plan.getChild(0);
assert (child != null);
// an order by node.
if (!(child instanceof OrderByPlanNode)) {
return plan;
}
OrderByPlanNode onode = (OrderByPlanNode) child;
child = onode.getChild(0);
// for this optimization to work.
if (!(child instanceof ReceivePlanNode)) {
return plan;
}
ReceivePlanNode receiveNode = (ReceivePlanNode) child;
assert (receiveNode.getChildCount() == 1);
child = receiveNode.getChild(0);
// The Receive node needs a send node child.
assert (child instanceof SendPlanNode);
SendPlanNode sendNode = (SendPlanNode) child;
child = sendNode.getChild(0);
// returns the number in the plan node.
if (!(child instanceof IndexSortablePlanNode)) {
return plan;
}
IndexSortablePlanNode indexed = (IndexSortablePlanNode) child;
if (indexed.indexUse().getWindowFunctionUsesIndex() != 0) {
return plan;
}
// Remove the Receive node and the Order by node
// and replace them with a MergeReceive node. Leave
// the order by node inline in the MergeReceive node,
// since we need it to calculate the merge.
plan.clearChildren();
receiveNode.removeFromGraph();
MergeReceivePlanNode mrnode = new MergeReceivePlanNode();
mrnode.addInlinePlanNode(onode);
mrnode.addAndLinkChild(sendNode);
plan.addAndLinkChild(mrnode);
return plan;
}
use of org.voltdb.plannodes.OrderByPlanNode in project voltdb by VoltDB.
the class TestPlansDistinct method checkDistinctWithGroupbyPlans.
/**
*
* @param distinctSQL Group by query with distinct
* @param groupbySQL Group by query without distinct
*/
protected void checkDistinctWithGroupbyPlans(String distinctSQL, String groupbySQL, boolean limitPushdown) {
List<AbstractPlanNode> pns1 = compileToFragments(distinctSQL);
List<AbstractPlanNode> pns2 = compileToFragments(groupbySQL);
//printExplainPlan(pns1);
//printExplainPlan(pns2);
assertTrue(pns1.get(0) instanceof SendPlanNode);
assertTrue(pns2.get(0) instanceof SendPlanNode);
AbstractPlanNode apn1, apn2;
apn1 = pns1.get(0).getChild(0);
apn2 = pns2.get(0).getChild(0);
boolean hasTopProjection1 = false;
if (apn1 instanceof ProjectionPlanNode) {
apn1 = apn1.getChild(0);
hasTopProjection1 = true;
}
boolean hasTopProjection2 = false;
if (apn2 instanceof ProjectionPlanNode) {
apn2 = apn2.getChild(0);
hasTopProjection2 = true;
}
// DISTINCT plan node is rewrote with GROUP BY and adds above the original GROUP BY node
// there may be another projection node in between for complex aggregation case
boolean hasOrderby = false, hasLimit = false;
boolean groupByMergeReceive = false;
// infer the ORDERBY/LIMIT information from the base line query
if (apn2 instanceof OrderByPlanNode) {
hasOrderby = true;
if (apn2.getInlinePlanNode(PlanNodeType.LIMIT) != null) {
hasLimit = true;
}
apn2 = apn2.getChild(0);
} else if (apn2 instanceof LimitPlanNode) {
hasLimit = true;
apn2 = apn2.getChild(0);
} else if (apn2 instanceof MergeReceivePlanNode) {
assertTrue(apn2.getInlinePlanNode(PlanNodeType.ORDERBY) != null);
hasOrderby = true;
hasLimit = apn2.getInlinePlanNode(PlanNodeType.LIMIT) != null;
groupByMergeReceive = true;
}
// check the DISTINCT query plan
boolean distinctMergeReceive = false;
if (hasOrderby) {
if (apn1 instanceof OrderByPlanNode) {
assertTrue(apn1 instanceof OrderByPlanNode);
if (hasLimit) {
// check inline limit
assertNotNull(apn1.getInlinePlanNode(PlanNodeType.LIMIT));
}
apn1 = apn1.getChild(0);
} else if (apn1 instanceof MergeReceivePlanNode) {
distinctMergeReceive = true;
assertNotNull(apn1.getInlinePlanNode(PlanNodeType.ORDERBY));
assertEquals(0, apn1.getChildCount());
} else {
fail("The distinctSQL top node is not OrderBy or MergeReceive.");
}
} else if (hasLimit) {
assertTrue(apn1 instanceof LimitPlanNode);
apn1 = apn1.getChild(0);
}
// Check DISTINCT group by plan node
if (distinctMergeReceive) {
AbstractPlanNode aggr = AggregatePlanNode.getInlineAggregationNode(apn1);
assertTrue(aggr instanceof AggregatePlanNode);
assertEquals(0, ((AggregatePlanNode) aggr).getAggregateTypesSize());
assertEquals(pns1.get(0).getOutputSchema().getColumns().size(), ((AggregatePlanNode) aggr).getGroupByExpressionsSize());
if (hasLimit) {
// check inline limit
assertNotNull(aggr.getInlinePlanNode(PlanNodeType.LIMIT));
}
} else {
assertTrue(apn1 instanceof HashAggregatePlanNode);
assertEquals(0, ((HashAggregatePlanNode) apn1).getAggregateTypesSize());
assertEquals(pns1.get(0).getOutputSchema().getColumns().size(), ((HashAggregatePlanNode) apn1).getGroupByExpressionsSize());
apn1 = apn1.getChild(0);
}
// check projection node for complex aggregation case
if (apn1 instanceof ProjectionPlanNode) {
apn1 = apn1.getChild(0);
assertFalse(hasTopProjection1);
}
if (apn2 instanceof ProjectionPlanNode) {
apn2 = apn2.getChild(0);
assertFalse(hasTopProjection2);
}
// check the rest plan nodes.
if (distinctMergeReceive == false && groupByMergeReceive == false) {
assertEquals(apn1.toExplainPlanString(), apn2.toExplainPlanString());
} else if (distinctMergeReceive == true && groupByMergeReceive == true) {
// In case of applied MergeReceive optimization the apn1 and apn2 nodes
// should not have any children
assertEquals(0, apn1.getChildCount());
assertEquals(0, apn2.getChildCount());
}
// Distributed DISTINCT GROUP BY
if (pns1.size() > 1) {
if (!limitPushdown) {
assertEquals(pns1.get(1).toExplainPlanString(), pns2.get(1).toExplainPlanString());
return;
}
assertTrue(pns1.get(1) instanceof SendPlanNode);
assertTrue(pns2.get(1) instanceof SendPlanNode);
apn1 = pns1.get(1).getChild(0);
apn2 = pns2.get(1).getChild(0);
// ignore the ORDER BY/LIMIT pushdown plan node
// because DISTINCT case can not be pushed down
assertTrue(apn2 instanceof OrderByPlanNode);
assertNotNull(apn2.getInlinePlanNode(PlanNodeType.LIMIT));
apn2 = apn2.getChild(0);
// winners may produce completely different paths.
if (distinctMergeReceive == false && groupByMergeReceive == false) {
assertEquals(apn1.toExplainPlanString(), apn2.toExplainPlanString());
}
}
}
use of org.voltdb.plannodes.OrderByPlanNode in project voltdb by VoltDB.
the class PlanAssembler method checkLimitPushDownViability.
/**
* Check if we can push the limit node down.
*
* Return a mid-plan send node, if one exists and can host a
* distributed limit node.
* There is guaranteed to be at most a single receive/send pair.
* Abort the search if a node that a "limit" can't be pushed past
* is found before its receive node.
*
* Can only push past:
* * coordinatingAggregator: a distributed aggregator
* a copy of which has already been pushed down.
* Distributing a LIMIT to just above that aggregator is correct.
* (I've got some doubts that this is correct??? --paul)
*
* * order by: if the plan requires a sort, getNextSelectPlan()
* will have already added an ORDER BY.
* A distributed LIMIT will be added above a copy
* of that ORDER BY node.
*
* * projection: these have no effect on the application of limits.
*
* @param root
* @return If we can push the limit down, the send plan node is returned.
* Otherwise null -- when the plan is single-partition when
* its "coordinator" part contains a push-blocking node type.
*/
protected AbstractPlanNode checkLimitPushDownViability(AbstractPlanNode root) {
AbstractPlanNode receiveNode = root;
List<ParsedColInfo> orderBys = m_parsedSelect.orderByColumns();
boolean orderByCoversAllGroupBy = m_parsedSelect.groupByIsAnOrderByPermutation();
while (!(receiveNode instanceof ReceivePlanNode)) {
// TODO: We might want to optimize/push down "limit" for some cases
if (!(receiveNode instanceof OrderByPlanNode) && !(receiveNode instanceof ProjectionPlanNode) && !isValidAggregateNodeForLimitPushdown(receiveNode, orderBys, orderByCoversAllGroupBy)) {
return null;
}
if (receiveNode instanceof OrderByPlanNode) {
// limit can still push down if ordered by aggregate values.
if (!m_parsedSelect.hasPartitionColumnInGroupby() && isOrderByAggregationValue(m_parsedSelect.orderByColumns())) {
return null;
}
}
// Traverse...
if (receiveNode.getChildCount() == 0) {
return null;
}
// nothing that allows pushing past has multiple inputs
assert (receiveNode.getChildCount() == 1);
receiveNode = receiveNode.getChild(0);
}
return receiveNode.getChild(0);
}
use of org.voltdb.plannodes.OrderByPlanNode in project voltdb by VoltDB.
the class PlanAssembler method handleOrderBy.
/**
* Create an order by node as required by the statement and make it a parent of root.
* @param parsedStmt Parsed statement, for context
* @param root The root of the plan needing ordering
* @return new orderByNode (the new root) or the original root if no orderByNode was required.
*/
private static AbstractPlanNode handleOrderBy(AbstractParsedStmt parsedStmt, AbstractPlanNode root) {
assert (parsedStmt instanceof ParsedSelectStmt || parsedStmt instanceof ParsedUnionStmt || parsedStmt instanceof ParsedDeleteStmt);
if (!isOrderByNodeRequired(parsedStmt, root)) {
return root;
}
OrderByPlanNode orderByNode = buildOrderByPlanNode(parsedStmt.orderByColumns());
orderByNode.addAndLinkChild(root);
return orderByNode;
}
use of org.voltdb.plannodes.OrderByPlanNode in project voltdb by VoltDB.
the class PlanAssembler method getNextSelectPlan.
private CompiledPlan getNextSelectPlan() {
assert (m_subAssembler != null);
// A matview reaggregation template plan may have been initialized
// with a post-predicate expression moved from the statement's
// join tree prior to any subquery planning.
// Since normally subquery planning is driven from the join tree,
// any subqueries that are moved out of the join tree would need
// to be planned separately.
// This planning would need to be done prior to calling
// m_subAssembler.nextPlan()
// because it can have query partitioning implications.
// Under the current query limitations, the partitioning implications
// are very simple -- subqueries are not allowed in multipartition
// queries against partitioned data, so detection of a subquery in
// the same query as a matview reaggregation can just return an error,
// without any need for subquery planning here.
HashAggregatePlanNode reAggNode = null;
HashAggregatePlanNode mvReAggTemplate = m_parsedSelect.m_mvFixInfo.getReAggregationPlanNode();
if (mvReAggTemplate != null) {
reAggNode = new HashAggregatePlanNode(mvReAggTemplate);
AbstractExpression postPredicate = reAggNode.getPostPredicate();
if (postPredicate != null && postPredicate.hasSubquerySubexpression()) {
// For now, this is just a special case violation of the limitation on
// use of subquery expressions in MP queries on partitioned data.
// That special case was going undetected when we didn't flag it here.
m_recentErrorMsg = IN_EXISTS_SCALAR_ERROR_MESSAGE;
return null;
}
// // Something more along these lines would have to be enabled
// // to allow expression subqueries to be used in multi-partition
// // matview queries.
// if (!getBestCostPlanForExpressionSubQueries(subqueryExprs)) {
// // There was at least one sub-query and we should have a compiled plan for it
// return null;
// }
}
AbstractPlanNode subSelectRoot = m_subAssembler.nextPlan();
if (subSelectRoot == null) {
m_recentErrorMsg = m_subAssembler.m_recentErrorMsg;
return null;
}
AbstractPlanNode root = subSelectRoot;
boolean mvFixNeedsProjection = false;
/*
* If the access plan for the table in the join order was for a
* distributed table scan there must be a send/receive pair at the top
* EXCEPT for the special outer join case in which a replicated table
* was on the OUTER side of an outer join across from the (joined) scan
* of the partitioned table(s) (all of them) in the query. In that case,
* the one required send/receive pair is already in the plan below the
* inner side of a NestLoop join.
*/
if (m_partitioning.requiresTwoFragments()) {
boolean mvFixInfoCoordinatorNeeded = true;
boolean mvFixInfoEdgeCaseOuterJoin = false;
ArrayList<AbstractPlanNode> receivers = root.findAllNodesOfClass(AbstractReceivePlanNode.class);
if (receivers.size() == 1) {
// Edge cases: left outer join with replicated table.
if (m_parsedSelect.m_mvFixInfo.needed()) {
mvFixInfoCoordinatorNeeded = false;
AbstractPlanNode receiveNode = receivers.get(0);
if (receiveNode.getParent(0) instanceof NestLoopPlanNode) {
if (subSelectRoot.hasInlinedIndexScanOfTable(m_parsedSelect.m_mvFixInfo.getMVTableName())) {
return getNextSelectPlan();
}
List<AbstractPlanNode> nljs = receiveNode.findAllNodesOfType(PlanNodeType.NESTLOOP);
List<AbstractPlanNode> nlijs = receiveNode.findAllNodesOfType(PlanNodeType.NESTLOOPINDEX);
// This is like a single table case.
if (nljs.size() + nlijs.size() == 0) {
mvFixInfoEdgeCaseOuterJoin = true;
}
root = handleMVBasedMultiPartQuery(reAggNode, root, mvFixInfoEdgeCaseOuterJoin);
}
}
} else {
if (receivers.size() > 0) {
throw new PlanningErrorException("This special case join between an outer replicated table and " + "an inner partitioned table is too complex and is not supported.");
}
root = SubPlanAssembler.addSendReceivePair(root);
// Root is a receive node here.
assert (root instanceof ReceivePlanNode);
if (m_parsedSelect.mayNeedAvgPushdown()) {
m_parsedSelect.switchOptimalSuiteForAvgPushdown();
}
if (m_parsedSelect.m_tableList.size() > 1 && m_parsedSelect.m_mvFixInfo.needed() && subSelectRoot.hasInlinedIndexScanOfTable(m_parsedSelect.m_mvFixInfo.getMVTableName())) {
// So, in-lined index scan of Nested loop index join can not be possible.
return getNextSelectPlan();
}
}
root = handleAggregationOperators(root);
// Process the re-aggregate plan node and insert it into the plan.
if (m_parsedSelect.m_mvFixInfo.needed() && mvFixInfoCoordinatorNeeded) {
AbstractPlanNode tmpRoot = root;
root = handleMVBasedMultiPartQuery(reAggNode, root, mvFixInfoEdgeCaseOuterJoin);
if (root != tmpRoot) {
mvFixNeedsProjection = true;
}
}
} else {
/*
* There is no receive node and root is a single partition plan.
*/
// If there is no receive plan node and no distributed plan has been generated,
// the fix set for MV is not needed.
m_parsedSelect.m_mvFixInfo.setNeeded(false);
root = handleAggregationOperators(root);
}
// add a PartitionByPlanNode here.
if (m_parsedSelect.hasWindowFunctionExpression()) {
root = handleWindowedOperators(root);
}
if (m_parsedSelect.hasOrderByColumns()) {
root = handleOrderBy(m_parsedSelect, root);
if (m_parsedSelect.isComplexOrderBy() && root instanceof OrderByPlanNode) {
AbstractPlanNode child = root.getChild(0);
AbstractPlanNode grandChild = child.getChild(0);
// swap the ORDER BY and complex aggregate Projection node
if (child instanceof ProjectionPlanNode) {
root.unlinkChild(child);
child.unlinkChild(grandChild);
child.addAndLinkChild(root);
root.addAndLinkChild(grandChild);
// update the new root
root = child;
} else if (m_parsedSelect.hasDistinctWithGroupBy() && child.getPlanNodeType() == PlanNodeType.HASHAGGREGATE && grandChild.getPlanNodeType() == PlanNodeType.PROJECTION) {
AbstractPlanNode grandGrandChild = grandChild.getChild(0);
child.clearParents();
root.clearChildren();
grandGrandChild.clearParents();
grandChild.clearChildren();
grandChild.addAndLinkChild(root);
root.addAndLinkChild(grandGrandChild);
root = child;
}
}
}
// node.
if (mvFixNeedsProjection || needProjectionNode(root)) {
root = addProjection(root);
}
if (m_parsedSelect.hasLimitOrOffset()) {
root = handleSelectLimitOperator(root);
}
CompiledPlan plan = new CompiledPlan();
plan.rootPlanGraph = root;
plan.setReadOnly(true);
boolean orderIsDeterministic = m_parsedSelect.isOrderDeterministic();
boolean hasLimitOrOffset = m_parsedSelect.hasLimitOrOffset();
String contentDeterminismMessage = m_parsedSelect.getContentDeterminismMessage();
plan.statementGuaranteesDeterminism(hasLimitOrOffset, orderIsDeterministic, contentDeterminismMessage);
// Apply the micro-optimization:
// LIMIT push down, Table count / Counting Index, Optimized Min/Max
MicroOptimizationRunner.applyAll(plan, m_parsedSelect);
return plan;
}
Aggregations