use of org.voltdb.plannodes.AbstractScanPlanNode in project voltdb by VoltDB.
the class plannerTester method diffScans.
public static boolean diffScans(AbstractPlanNode oldpn, AbstractPlanNode newpn) {
m_changedSQL = false;
boolean noDiff = true;
ArrayList<AbstractScanPlanNode> list1 = oldpn.getScanNodeList();
ArrayList<AbstractScanPlanNode> list2 = newpn.getScanNodeList();
int size1 = list1.size();
int size2 = list2.size();
int max = Math.max(size1, size2);
int min = Math.min(size1, size2);
diffPair intdiffPair = new diffPair(0, 0);
ArrayList<String> messages = new ArrayList<String>();
if (max == 0) {
messages.add("0 scan statement");
} else {
AbstractScanPlanNode spn1 = null;
AbstractScanPlanNode spn2 = null;
if (size1 != size2) {
intdiffPair.set(size1, size2);
messages.add("Scan time diff : " + "\n" + intdiffPair.toString() + "\nSQL statement might be changed");
m_changedSQL = true;
for (int i = 0; i < min; i++) {
spn1 = list1.get(i);
spn2 = list2.get(i);
scanNodeDiffModule(i, spn1, spn2, messages);
}
// lists size are different
if (size2 < max) {
for (int i = min; i < max; i++) {
spn1 = list1.get(i);
spn2 = null;
scanNodeDiffModule(i, spn1, spn2, messages);
}
} else if (size1 < max) {
for (int i = min; i < max; i++) {
spn1 = null;
spn2 = list2.get(i);
scanNodeDiffModule(i, spn1, spn2, messages);
}
}
} else {
messages.add("same leaf size");
if (max == 1) {
messages.add("Single scan plan");
spn1 = list1.get(0);
spn2 = list2.get(0);
scanNodeDiffModule(0, spn1, spn2, messages);
} else {
messages.add("Join query");
for (int i = 0; i < max; i++) {
spn1 = list1.get(i);
spn2 = list2.get(i);
scanNodeDiffModule(i, spn1, spn2, messages);
}
}
}
}
for (String msg : messages) {
if (msg.contains("diff") || msg.contains("Diff")) {
noDiff = false;
break;
}
}
m_diffMessages.addAll(messages);
return noDiff;
}
use of org.voltdb.plannodes.AbstractScanPlanNode in project voltdb by VoltDB.
the class PlanAssembler method getNextDeletePlan.
private CompiledPlan getNextDeletePlan() {
assert (m_subAssembler != null);
// figure out which table we're deleting from
assert (m_parsedDelete.m_tableList.size() == 1);
Table targetTable = m_parsedDelete.m_tableList.get(0);
AbstractPlanNode subSelectRoot = m_subAssembler.nextPlan();
if (subSelectRoot == null) {
return null;
}
// ENG-4909 Bug: currently disable NESTLOOPINDEX plan for IN
if (disableNestedLoopIndexJoinForInComparison(subSelectRoot, m_parsedDelete)) {
// simply jumps ahead to the next plan (if any).
return getNextDeletePlan();
}
boolean isSinglePartitionPlan = m_partitioning.wasSpecifiedAsSingle() || m_partitioning.isInferredSingle();
// generate the delete node with the right target table
DeletePlanNode deleteNode = new DeletePlanNode();
deleteNode.setTargetTableName(targetTable.getTypeName());
assert (subSelectRoot instanceof AbstractScanPlanNode);
// nodes and use a truncate delete node.
if (deleteIsTruncate(m_parsedDelete, subSelectRoot)) {
deleteNode.setTruncate(true);
} else {
// User may have specified an ORDER BY ... LIMIT clause
if (m_parsedDelete.orderByColumns().size() > 0 && !isSinglePartitionPlan && !targetTable.getIsreplicated()) {
throw new PlanningErrorException("DELETE statements affecting partitioned tables must " + "be able to execute on one partition " + "when ORDER BY and LIMIT or OFFSET clauses " + "are present.");
}
boolean needsOrderByNode = isOrderByNodeRequired(m_parsedDelete, subSelectRoot);
AbstractExpression addressExpr = new TupleAddressExpression();
NodeSchema proj_schema = new NodeSchema();
// This planner-created column is magic.
proj_schema.addColumn(AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "tuple_address", "tuple_address", addressExpr);
if (needsOrderByNode) {
// Projection will need to pass the sort keys to the order by node
for (ParsedColInfo col : m_parsedDelete.orderByColumns()) {
proj_schema.addColumn(col.asSchemaColumn());
}
}
ProjectionPlanNode projectionNode = new ProjectionPlanNode(proj_schema);
subSelectRoot.addInlinePlanNode(projectionNode);
AbstractPlanNode root = subSelectRoot;
if (needsOrderByNode) {
OrderByPlanNode ob = buildOrderByPlanNode(m_parsedDelete.orderByColumns());
ob.addAndLinkChild(root);
root = ob;
}
if (m_parsedDelete.hasLimitOrOffset()) {
assert (m_parsedDelete.orderByColumns().size() > 0);
root.addInlinePlanNode(m_parsedDelete.limitPlanNode());
}
deleteNode.addAndLinkChild(root);
}
CompiledPlan plan = new CompiledPlan();
plan.setReadOnly(false);
// check non-determinism status
// treat this as deterministic for reporting purposes:
// delete statements produce just one row that is the
// number of rows affected
boolean orderIsDeterministic = true;
boolean hasLimitOrOffset = m_parsedDelete.hasLimitOrOffset();
// The delete statement cannot be inherently content non-deterministic.
// So, the last parameter is always null.
plan.statementGuaranteesDeterminism(hasLimitOrOffset, orderIsDeterministic, null);
if (isSinglePartitionPlan) {
plan.rootPlanGraph = deleteNode;
return plan;
}
// Add a compensating sum of modified tuple counts or a limit 1
// AND a send on top of the union-like receive node.
boolean isReplicated = targetTable.getIsreplicated();
plan.rootPlanGraph = addCoordinatorToDMLNode(deleteNode, isReplicated);
return plan;
}
use of org.voltdb.plannodes.AbstractScanPlanNode in project voltdb by VoltDB.
the class PlanAssembler method isOrderByNodeRequired.
/**
* Determine if an OrderByPlanNode is needed. This may return false if the
* statement has no ORDER BY clause, or if the subtree is already producing
* rows in the correct order. Note that a hash aggregate node will cause this
* to return true, and a serial or partial aggregate node may cause this
* to return true.
*
* @param parsedStmt The statement whose plan may need an OrderByPlanNode
* @param root The subtree which may need its output tuples ordered
* @return true if the plan needs an OrderByPlanNode, false otherwise
*/
private static boolean isOrderByNodeRequired(AbstractParsedStmt parsedStmt, AbstractPlanNode root) {
// Only sort when the statement has an ORDER BY.
if (!parsedStmt.hasOrderByColumns()) {
return false;
}
// Skip the explicit ORDER BY plan step if an IndexScan is already providing the equivalent ordering.
// Note that even tree index scans that produce values in their own "key order" only report
// their sort direction != SortDirectionType.INVALID
// when they enforce an ordering equivalent to the one requested in the ORDER BY
// or window function clause. Even an intervening non-hash aggregate will not interfere
// in this optimization.
// Is there a window function between the root and the
// scan or join nodes? Also, does this window function
// use the index.
int numberWindowFunctions = 0;
int numberReceiveNodes = 0;
int numberHashAggregates = 0;
// EE keeps the insertion ORDER so that ORDER BY could apply before DISTINCT.
// However, this probably is not optimal if there are low cardinality results.
// Again, we have to replace the TVEs for ORDER BY clause for these cases in planning.
//
// Find the scan or join node.
AbstractPlanNode probe;
for (probe = root; !((probe instanceof AbstractJoinPlanNode) || (probe instanceof AbstractScanPlanNode)) && (probe != null); probe = (probe.getChildCount() > 0) ? probe.getChild(0) : null) {
// we will have recorded it in the scan or join node.
if (probe.getPlanNodeType() == PlanNodeType.WINDOWFUNCTION) {
numberWindowFunctions += 1;
}
// needs them.
if (probe.getPlanNodeType() == PlanNodeType.RECEIVE) {
numberReceiveNodes += 1;
}
// the ordering, but a serial aggregation does not.
if ((probe.getPlanNodeType() == PlanNodeType.HASHAGGREGATE) || (probe.getPlanNodeType() == PlanNodeType.PARTIALAGGREGATE)) {
numberHashAggregates += 1;
}
}
if (probe == null) {
// to be right. Maybe this should be an assert?
return true;
}
//
if (!(probe instanceof IndexSortablePlanNode)) {
return true;
}
IndexUseForOrderBy indexUse = ((IndexSortablePlanNode) probe).indexUse();
if (indexUse.getSortOrderFromIndexScan() == SortDirectionType.INVALID) {
return true;
}
// an ORDERBY node.
if (numberHashAggregates > 0) {
return true;
}
if (numberWindowFunctions == 0) {
if (indexUse.getWindowFunctionUsesIndex() == SubPlanAssembler.NO_INDEX_USE) {
return true;
}
assert (indexUse.getWindowFunctionUsesIndex() == SubPlanAssembler.STATEMENT_LEVEL_ORDER_BY_INDEX);
// false for SP (numberReceiveNodes == 0);
return numberReceiveNodes > 0;
}
if (numberWindowFunctions == 1) {
// will return 0.
if ((indexUse.getWindowFunctionUsesIndex() != 0) || (!indexUse.isWindowFunctionCompatibleWithOrderBy())) {
return true;
}
// does not need one. So this is a false.
return false;
}
// because we only support one window function.
return true;
}
use of org.voltdb.plannodes.AbstractScanPlanNode in project voltdb by VoltDB.
the class PlanAssembler method connectChildrenBestPlans.
/**
* For each Subquery node in the plan tree attach the subquery plan to the parent node.
* @param initial plan
* @return A complete plan tree for the entire SQl.
*/
private AbstractPlanNode connectChildrenBestPlans(AbstractPlanNode parentPlan) {
if (parentPlan instanceof AbstractScanPlanNode) {
AbstractScanPlanNode scanNode = (AbstractScanPlanNode) parentPlan;
StmtTableScan tableScan = scanNode.getTableScan();
if (tableScan instanceof StmtSubqueryScan) {
CompiledPlan bestCostPlan = ((StmtSubqueryScan) tableScan).getBestCostPlan();
assert (bestCostPlan != null);
AbstractPlanNode subQueryRoot = bestCostPlan.rootPlanGraph;
subQueryRoot.disconnectParents();
scanNode.clearChildren();
scanNode.addAndLinkChild(subQueryRoot);
}
} else {
for (int i = 0; i < parentPlan.getChildCount(); ++i) {
connectChildrenBestPlans(parentPlan.getChild(i));
}
}
return parentPlan;
}
use of org.voltdb.plannodes.AbstractScanPlanNode in project voltdb by VoltDB.
the class PlanAssembler method addProjection.
/**
* Given a relatively complete plan-sub-graph, apply a trivial projection
* (filter) to it. If the root node can embed the projection do so. If not,
* add a new projection node.
*
* @param rootNode
* The root of the plan-sub-graph to add the projection to.
* @return The new root of the plan-sub-graph (might be the same as the
* input).
*/
private AbstractPlanNode addProjection(AbstractPlanNode rootNode) {
assert (m_parsedSelect != null);
assert (m_parsedSelect.m_displayColumns != null);
// Build the output schema for the projection based on the display columns
NodeSchema proj_schema = m_parsedSelect.getFinalProjectionSchema();
for (SchemaColumn col : proj_schema.getColumns()) {
// Adjust the differentiator fields of TVEs, since they need to
// reflect the inlined projection node in scan nodes.
AbstractExpression colExpr = col.getExpression();
Collection<TupleValueExpression> allTves = ExpressionUtil.getTupleValueExpressions(colExpr);
for (TupleValueExpression tve : allTves) {
if (!tve.needsDifferentiation()) {
// so we just ignore it here.
continue;
}
rootNode.adjustDifferentiatorField(tve);
}
}
ProjectionPlanNode projectionNode = new ProjectionPlanNode();
projectionNode.setOutputSchemaWithoutClone(proj_schema);
// projection node inline.
if (rootNode instanceof AbstractScanPlanNode) {
rootNode.addInlinePlanNode(projectionNode);
return rootNode;
}
projectionNode.addAndLinkChild(rootNode);
return projectionNode;
}
Aggregations