use of org.voltdb.plannodes.SendPlanNode in project voltdb by VoltDB.
the class InlineOrderByIntoMergeReceive method applyOptimization.
/**
* Convert ReceivePlanNodes into MergeReceivePlanNodes when the
* RECEIVE node's nearest parent is a window function. We won't
* have any inline limits or aggregates here, so this is somewhat
* simpler than the order by case.
*
* @param plan
* @return
*/
private AbstractPlanNode applyOptimization(WindowFunctionPlanNode plan) {
assert (plan.getChildCount() == 1);
assert (plan.getChild(0) != null);
AbstractPlanNode child = plan.getChild(0);
assert (child != null);
// an order by node.
if (!(child instanceof OrderByPlanNode)) {
return plan;
}
OrderByPlanNode onode = (OrderByPlanNode) child;
child = onode.getChild(0);
// for this optimization to work.
if (!(child instanceof ReceivePlanNode)) {
return plan;
}
ReceivePlanNode receiveNode = (ReceivePlanNode) child;
assert (receiveNode.getChildCount() == 1);
child = receiveNode.getChild(0);
// The Receive node needs a send node child.
assert (child instanceof SendPlanNode);
SendPlanNode sendNode = (SendPlanNode) child;
child = sendNode.getChild(0);
// returns the number in the plan node.
if (!(child instanceof IndexSortablePlanNode)) {
return plan;
}
IndexSortablePlanNode indexed = (IndexSortablePlanNode) child;
if (indexed.indexUse().getWindowFunctionUsesIndex() != 0) {
return plan;
}
// Remove the Receive node and the Order by node
// and replace them with a MergeReceive node. Leave
// the order by node inline in the MergeReceive node,
// since we need it to calculate the merge.
plan.clearChildren();
receiveNode.removeFromGraph();
MergeReceivePlanNode mrnode = new MergeReceivePlanNode();
mrnode.addInlinePlanNode(onode);
mrnode.addAndLinkChild(sendNode);
plan.addAndLinkChild(mrnode);
return plan;
}
use of org.voltdb.plannodes.SendPlanNode in project voltdb by VoltDB.
the class TestPlansDistinct method checkDistinctWithGroupbyPlans.
/**
*
* @param distinctSQL Group by query with distinct
* @param groupbySQL Group by query without distinct
*/
protected void checkDistinctWithGroupbyPlans(String distinctSQL, String groupbySQL, boolean limitPushdown) {
List<AbstractPlanNode> pns1 = compileToFragments(distinctSQL);
List<AbstractPlanNode> pns2 = compileToFragments(groupbySQL);
//printExplainPlan(pns1);
//printExplainPlan(pns2);
assertTrue(pns1.get(0) instanceof SendPlanNode);
assertTrue(pns2.get(0) instanceof SendPlanNode);
AbstractPlanNode apn1, apn2;
apn1 = pns1.get(0).getChild(0);
apn2 = pns2.get(0).getChild(0);
boolean hasTopProjection1 = false;
if (apn1 instanceof ProjectionPlanNode) {
apn1 = apn1.getChild(0);
hasTopProjection1 = true;
}
boolean hasTopProjection2 = false;
if (apn2 instanceof ProjectionPlanNode) {
apn2 = apn2.getChild(0);
hasTopProjection2 = true;
}
// DISTINCT plan node is rewrote with GROUP BY and adds above the original GROUP BY node
// there may be another projection node in between for complex aggregation case
boolean hasOrderby = false, hasLimit = false;
boolean groupByMergeReceive = false;
// infer the ORDERBY/LIMIT information from the base line query
if (apn2 instanceof OrderByPlanNode) {
hasOrderby = true;
if (apn2.getInlinePlanNode(PlanNodeType.LIMIT) != null) {
hasLimit = true;
}
apn2 = apn2.getChild(0);
} else if (apn2 instanceof LimitPlanNode) {
hasLimit = true;
apn2 = apn2.getChild(0);
} else if (apn2 instanceof MergeReceivePlanNode) {
assertTrue(apn2.getInlinePlanNode(PlanNodeType.ORDERBY) != null);
hasOrderby = true;
hasLimit = apn2.getInlinePlanNode(PlanNodeType.LIMIT) != null;
groupByMergeReceive = true;
}
// check the DISTINCT query plan
boolean distinctMergeReceive = false;
if (hasOrderby) {
if (apn1 instanceof OrderByPlanNode) {
assertTrue(apn1 instanceof OrderByPlanNode);
if (hasLimit) {
// check inline limit
assertNotNull(apn1.getInlinePlanNode(PlanNodeType.LIMIT));
}
apn1 = apn1.getChild(0);
} else if (apn1 instanceof MergeReceivePlanNode) {
distinctMergeReceive = true;
assertNotNull(apn1.getInlinePlanNode(PlanNodeType.ORDERBY));
assertEquals(0, apn1.getChildCount());
} else {
fail("The distinctSQL top node is not OrderBy or MergeReceive.");
}
} else if (hasLimit) {
assertTrue(apn1 instanceof LimitPlanNode);
apn1 = apn1.getChild(0);
}
// Check DISTINCT group by plan node
if (distinctMergeReceive) {
AbstractPlanNode aggr = AggregatePlanNode.getInlineAggregationNode(apn1);
assertTrue(aggr instanceof AggregatePlanNode);
assertEquals(0, ((AggregatePlanNode) aggr).getAggregateTypesSize());
assertEquals(pns1.get(0).getOutputSchema().getColumns().size(), ((AggregatePlanNode) aggr).getGroupByExpressionsSize());
if (hasLimit) {
// check inline limit
assertNotNull(aggr.getInlinePlanNode(PlanNodeType.LIMIT));
}
} else {
assertTrue(apn1 instanceof HashAggregatePlanNode);
assertEquals(0, ((HashAggregatePlanNode) apn1).getAggregateTypesSize());
assertEquals(pns1.get(0).getOutputSchema().getColumns().size(), ((HashAggregatePlanNode) apn1).getGroupByExpressionsSize());
apn1 = apn1.getChild(0);
}
// check projection node for complex aggregation case
if (apn1 instanceof ProjectionPlanNode) {
apn1 = apn1.getChild(0);
assertFalse(hasTopProjection1);
}
if (apn2 instanceof ProjectionPlanNode) {
apn2 = apn2.getChild(0);
assertFalse(hasTopProjection2);
}
// check the rest plan nodes.
if (distinctMergeReceive == false && groupByMergeReceive == false) {
assertEquals(apn1.toExplainPlanString(), apn2.toExplainPlanString());
} else if (distinctMergeReceive == true && groupByMergeReceive == true) {
// In case of applied MergeReceive optimization the apn1 and apn2 nodes
// should not have any children
assertEquals(0, apn1.getChildCount());
assertEquals(0, apn2.getChildCount());
}
// Distributed DISTINCT GROUP BY
if (pns1.size() > 1) {
if (!limitPushdown) {
assertEquals(pns1.get(1).toExplainPlanString(), pns2.get(1).toExplainPlanString());
return;
}
assertTrue(pns1.get(1) instanceof SendPlanNode);
assertTrue(pns2.get(1) instanceof SendPlanNode);
apn1 = pns1.get(1).getChild(0);
apn2 = pns2.get(1).getChild(0);
// ignore the ORDER BY/LIMIT pushdown plan node
// because DISTINCT case can not be pushed down
assertTrue(apn2 instanceof OrderByPlanNode);
assertNotNull(apn2.getInlinePlanNode(PlanNodeType.LIMIT));
apn2 = apn2.getChild(0);
// winners may produce completely different paths.
if (distinctMergeReceive == false && groupByMergeReceive == false) {
assertEquals(apn1.toExplainPlanString(), apn2.toExplainPlanString());
}
}
}
use of org.voltdb.plannodes.SendPlanNode in project voltdb by VoltDB.
the class plannerTester method configCompileSave.
private static void configCompileSave(String config, boolean isSave) throws Exception {
if (!setUp(config)) {
return;
}
int size = m_stmts.size();
for (int i = 0; i < size; i++) {
String query = m_stmts.get(i);
String joinOrder = null;
if (query.startsWith("JOIN:")) {
String[] splitLine = query.split(":");
joinOrder = splitLine[1];
query = splitLine[2];
}
// This avoids cascading "file-not-found" errors.
try {
List<AbstractPlanNode> pnList = s_singleton.compileWithJoinOrderToFragments(query, joinOrder);
AbstractPlanNode pn = pnList.get(0);
if (pnList.size() == 2) {
// multi partition query plan
assert (pnList.get(1) instanceof SendPlanNode);
if (!pn.reattachFragment(pnList.get(1))) {
System.err.println("Receive plan node not found in reattachFragment.");
}
}
writePlanToFile(pn, m_workPath, config + ".plan" + i, m_stmts.get(i));
if (isSave) {
writePlanToFile(pn, m_baselinePath, config + ".plan" + i, m_stmts.get(i));
}
} catch (PlanningErrorException ex) {
System.err.printf("Planning error, line %d: %s\n", i, ex.getMessage());
}
}
if (isSave) {
System.out.println("Baseline files generated at: " + m_baselinePath);
}
}
use of org.voltdb.plannodes.SendPlanNode in project voltdb by VoltDB.
the class PlanAssembler method addCoordinatorToDMLNode.
/**
* Add a receive node, a sum or limit node, and a send node to the given DML node.
* If the DML target is a replicated table, it will add a limit node,
* otherwise it adds a sum node.
*
* @param dmlRoot
* @param isReplicated Whether or not the target table is a replicated table.
* @return
*/
private static AbstractPlanNode addCoordinatorToDMLNode(AbstractPlanNode dmlRoot, boolean isReplicated) {
dmlRoot = SubPlanAssembler.addSendReceivePair(dmlRoot);
AbstractPlanNode sumOrLimitNode;
if (isReplicated) {
// Replicated table DML result doesn't need to be summed. All partitions should
// modify the same number of tuples in replicated table, so just pick the result from
// any partition.
LimitPlanNode limitNode = new LimitPlanNode();
sumOrLimitNode = limitNode;
limitNode.setLimit(1);
} else {
// create the nodes being pushed on top of dmlRoot.
AggregatePlanNode countNode = new AggregatePlanNode();
sumOrLimitNode = countNode;
// configure the count aggregate (sum) node to produce a single
// output column containing the result of the sum.
// Create a TVE that should match the tuple count input column
// This TVE is magic.
// really really need to make this less hard-wired
TupleValueExpression count_tve = new TupleValueExpression(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "modified_tuples", "modified_tuples", 0);
count_tve.setValueType(VoltType.BIGINT);
count_tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes());
countNode.addAggregate(ExpressionType.AGGREGATE_SUM, false, 0, count_tve);
// The output column. Not really based on a TVE (it is really the
// count expression represented by the count configured above). But
// this is sufficient for now. This looks identical to the above
// TVE but it's logically different so we'll create a fresh one.
TupleValueExpression tve = new TupleValueExpression(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "modified_tuples", "modified_tuples", 0);
tve.setValueType(VoltType.BIGINT);
tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes());
NodeSchema count_schema = new NodeSchema();
count_schema.addColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "modified_tuples", "modified_tuples", tve);
countNode.setOutputSchema(count_schema);
}
// connect the nodes to build the graph
sumOrLimitNode.addAndLinkChild(dmlRoot);
SendPlanNode sendNode = new SendPlanNode();
sendNode.addAndLinkChild(sumOrLimitNode);
return sendNode;
}
use of org.voltdb.plannodes.SendPlanNode in project voltdb by VoltDB.
the class QueryPlanner method compileFromXML.
/**
* Find the best plan given the VoltXMLElement. By best here we mean the plan
* which is scored the best according to our plan metric scoring. The plan
* metric scoring takes into account join order and index use, but it does
* not take into account the output schema. Consequently, we don't compute the
* output schema for the plan nodes until after the best plan is discovered.
*
* The order here is:
* <ol>
* <li>
* Parse the VoltXMLElement to create an AbstractParsedStatement. This has
* a second effect of loading lists of join orders and access paths for planning.
* For us, and access path is a way of scanning something scannable. It's a generalization
* of the notion of scanning a table or an index.
* </li>
* <li>
* Create a PlanAssembler, and ask it for the best cost plan. This uses the
* side data created by the parser in the previous step.
* </li>
* <li>
* If the plan is read only, slap a SendPlanNode on the front. Presumably
* an insert, delete or upsert will have added the SendPlanNode into the plan node tree already.
* </li>
* <li>
* Compute the output schema. This computes the output schema for each
* node recursively, using a node specific method.
* </li>
* <li>
* Resolve the column indices. This makes sure that the indices of all
* TVEs in the output columns refer to the right input columns.
* </li>
* <li>
* Do some final cleaning up and verifying of the plan. For example,
* We renumber the nodes starting at 1.
* </li>
* </ol>
*
* @param xmlSQL
* @param paramValues
* @return
*/
private CompiledPlan compileFromXML(VoltXMLElement xmlSQL, String[] paramValues) {
// Get a parsed statement from the xml
// The callers of compilePlan are ready to catch any exceptions thrown here.
AbstractParsedStmt parsedStmt = AbstractParsedStmt.parse(m_sql, xmlSQL, paramValues, m_db, m_joinOrder);
if (parsedStmt == null) {
m_recentErrorMsg = "Failed to parse SQL statement: " + getOriginalSql();
return null;
}
if (m_isUpsert) {
// no insert/upsert with joins
if (parsedStmt.m_tableList.size() != 1) {
m_recentErrorMsg = "UPSERT is supported only with one single table: " + getOriginalSql();
return null;
}
Table tb = parsedStmt.m_tableList.get(0);
Constraint pkey = null;
for (Constraint ct : tb.getConstraints()) {
if (ct.getType() == ConstraintType.PRIMARY_KEY.getValue()) {
pkey = ct;
break;
}
}
if (pkey == null) {
m_recentErrorMsg = "Unsupported UPSERT table without primary key: " + getOriginalSql();
return null;
}
}
m_planSelector.outputParsedStatement(parsedStmt);
// Init Assembler. Each plan assembler requires a new instance of the PlanSelector
// to keep track of the best plan
PlanAssembler assembler = new PlanAssembler(m_db, m_partitioning, (PlanSelector) m_planSelector.clone());
// find the plan with minimal cost
CompiledPlan bestPlan = assembler.getBestCostPlan(parsedStmt);
// make sure we got a winner
if (bestPlan == null) {
if (m_debuggingStaticModeToRetryOnError) {
assembler.getBestCostPlan(parsedStmt);
}
m_recentErrorMsg = assembler.getErrorMessage();
if (m_recentErrorMsg == null) {
m_recentErrorMsg = "Unable to plan for statement. Error unknown.";
}
return null;
}
if (bestPlan.isReadOnly()) {
SendPlanNode sendNode = new SendPlanNode();
// connect the nodes to build the graph
sendNode.addAndLinkChild(bestPlan.rootPlanGraph);
// this plan is final, generate schema and resolve all the column index references
bestPlan.rootPlanGraph = sendNode;
}
// Execute the generateOutputSchema and resolveColumnIndexes once for the best plan
bestPlan.rootPlanGraph.generateOutputSchema(m_db);
bestPlan.rootPlanGraph.resolveColumnIndexes();
if (parsedStmt instanceof ParsedSelectStmt) {
List<SchemaColumn> columns = bestPlan.rootPlanGraph.getOutputSchema().getColumns();
((ParsedSelectStmt) parsedStmt).checkPlanColumnMatch(columns);
}
// Output the best plan debug info
assembler.finalizeBestCostPlan();
// reset all the plan node ids for a given plan
// this makes the ids deterministic
bestPlan.resetPlanNodeIds(1);
// split up the plan everywhere we see send/receive into multiple plan fragments
List<AbstractPlanNode> receives = bestPlan.rootPlanGraph.findAllNodesOfClass(AbstractReceivePlanNode.class);
if (receives.size() > 1) {
// Have too many receive node for two fragment plan limit
m_recentErrorMsg = "This join of multiple partitioned tables is too complex. " + "Consider simplifying its subqueries: " + getOriginalSql();
return null;
}
/*/ enable for debug ...
if (receives.size() > 1) {
System.out.println(plan.rootPlanGraph.toExplainPlanString());
}
// ... enable for debug */
if (receives.size() == 1) {
AbstractReceivePlanNode recvNode = (AbstractReceivePlanNode) receives.get(0);
fragmentize(bestPlan, recvNode);
}
return bestPlan;
}
Aggregations