use of org.apache.derby.iapi.sql.compile.Optimizable in project derby by apache.
the class HashJoinStrategy method divideUpPredicateLists.
/**
* @see JoinStrategy#divideUpPredicateLists
*
* @exception StandardException Thrown on error
*/
public void divideUpPredicateLists(Optimizable innerTable, OptimizablePredicateList originalRestrictionList, OptimizablePredicateList storeRestrictionList, OptimizablePredicateList nonStoreRestrictionList, OptimizablePredicateList requalificationRestrictionList, DataDictionary dd) throws StandardException {
/*
** If we are walking a non-covering index, then all predicates that
** get evaluated in the HashScanResultSet, whether during the building
** or probing of the hash table, need to be evaluated at both the
** IndexRowToBaseRowResultSet and the HashScanResultSet to ensure
** that the rows materialized into the hash table still qualify when
** we go to read the row from the heap. This also includes predicates
** that are not qualifier/start/stop keys (hence not in store/non-store
** list).
*/
originalRestrictionList.copyPredicatesToOtherList(requalificationRestrictionList);
ConglomerateDescriptor cd = innerTable.getTrulyTheBestAccessPath().getConglomerateDescriptor();
/* For the inner table of a hash join, then divide up the predicates:
*
* o restrictionList - predicates that get applied when creating
* the hash table (single table clauses)
*
* o nonBaseTableRestrictionList
* - those that get applied when probing into the
* hash table (equijoin clauses on key columns,
* ordered by key column position first, followed
* by any other join predicates. (All predicates
* in this list are qualifiers which can be
* evaluated in the store).
*
* o baseTableRL - Only applicable if this is not a covering
* index. In that case, we will need to
* requalify the data page. Thus, this list
* will include all predicates.
*/
// Build the list to be applied when creating the hash table
originalRestrictionList.transferPredicates(storeRestrictionList, innerTable.getReferencedTableMap(), innerTable);
/*
* Eliminate any non-qualifiers that may have been pushed, but
* are redundant and not useful for hash join.
*
* For instance "in" (or other non-qualifier) was pushed down for
* start/stop key, * but for hash join, it may no longer be because
* previous key column may have been disqualified (eg., correlation).
* We simply remove
* such non-qualifier ("in") because we left it as residual predicate
* anyway. It's easier/safer to filter it out here than detect it
* ealier (and not push it down). Beetle 4316.
*
* Can't filter out OR list, as it is not a residual predicate,
*/
for (int i = storeRestrictionList.size() - 1; i >= 0; i--) {
Predicate p1 = (Predicate) storeRestrictionList.getOptPredicate(i);
if (!p1.isStoreQualifier() && !p1.isStartKey() && !p1.isStopKey()) {
storeRestrictionList.removeOptPredicate(i);
}
}
for (int i = originalRestrictionList.size() - 1; i >= 0; i--) {
Predicate p1 = (Predicate) originalRestrictionList.getOptPredicate(i);
if (!p1.isStoreQualifier())
originalRestrictionList.removeOptPredicate(i);
}
/* Copy the rest of the predicates to the non-store list */
originalRestrictionList.copyPredicatesToOtherList(nonStoreRestrictionList);
/* If innerTable is ProjectRestrictNode, we need to use its child
* to find hash key columns, this is because ProjectRestrictNode may
* not have underlying node's every result column as result column,
* and the predicate's column reference was bound to the underlying
* node's column position. Also we have to pass in the
* ProjectRestrictNode rather than the underlying node to this method
* because a predicate's referencedTableMap references the table number
* of the ProjectRestrictiveNode. And we need this info to see if
* a predicate is in storeRestrictionList that can be pushed down.
* Beetle 3458.
*/
Optimizable hashTableFor = innerTable;
if (innerTable instanceof ProjectRestrictNode) {
ProjectRestrictNode prn = (ProjectRestrictNode) innerTable;
if (prn.getChildResult() instanceof Optimizable)
hashTableFor = (Optimizable) (prn.getChildResult());
}
int[] hashKeyColumns = findHashKeyColumns(hashTableFor, cd, nonStoreRestrictionList);
if (hashKeyColumns != null) {
innerTable.setHashKeyColumns(hashKeyColumns);
} else {
String name;
if (cd != null && cd.isIndex()) {
name = cd.getConglomerateName();
} else {
name = innerTable.getBaseTableName();
}
throw StandardException.newException(SQLState.LANG_HASH_NO_EQUIJOIN_FOUND, name, innerTable.getBaseTableName());
}
// Mark all of the predicates in the probe list as qualifiers
nonStoreRestrictionList.markAllPredicatesQualifiers();
int[] conglomColumn = new int[hashKeyColumns.length];
if (cd != null && cd.isIndex()) {
/*
** If the conglomerate is an index, get the column numbers of the
** hash keys in the base heap.
*/
for (int index = 0; index < hashKeyColumns.length; index++) {
conglomColumn[index] = cd.getIndexDescriptor().baseColumnPositions()[hashKeyColumns[index]];
}
} else {
/*
** If the conglomerate is a heap, the column numbers of the hash
** key are the column numbers returned by findHashKeyColumns().
**
** NOTE: Must switch from zero-based to one-based
*/
for (int index = 0; index < hashKeyColumns.length; index++) {
conglomColumn[index] = hashKeyColumns[index] + 1;
}
}
/* Put the equality predicates on the key columns for the hash first.
* (Column # is columns[colCtr] from above.)
*/
for (int index = hashKeyColumns.length - 1; index >= 0; index--) {
nonStoreRestrictionList.putOptimizableEqualityPredicateFirst(innerTable, conglomColumn[index]);
}
}
use of org.apache.derby.iapi.sql.compile.Optimizable in project derby by apache.
the class OptimizerImpl method costPermutation.
/**
* @see org.apache.derby.iapi.sql.compile.Optimizer#costPermutation
*
* @exception StandardException Thrown on error
*/
public void costPermutation() throws StandardException {
/*
** Get the cost of the outer plan so far. This gives us the current
** estimated rows, ordering, etc.
*/
CostEstimate outerCost;
if (joinPosition == 0) {
outerCost = outermostCostEstimate;
} else {
/*
** NOTE: This is somewhat problematic. We assume here that the
** outer cost from the best access path for the outer table
** is OK to use even when costing the sort avoidance path for
** the inner table. This is probably OK, since all we use
** from the outer cost is the row count.
*/
outerCost = optimizableList.getOptimizable(proposedJoinOrder[joinPosition - 1]).getBestAccessPath().getCostEstimate();
}
/* At this point outerCost should be non-null (DERBY-1777).
* Do the assertion here so that we catch it right away;
* otherwise we'd end up with an NPE somewhere further
* down the tree and it'd be harder to figure out where
* it came from.
*/
if (SanityManager.DEBUG) {
SanityManager.ASSERT(outerCost != null, "outerCost is not expected to be null");
}
Optimizable optimizable = optimizableList.getOptimizable(proposedJoinOrder[joinPosition]);
/*
** Don't consider non-feasible join strategies.
*/
if (!optimizable.feasibleJoinStrategy(predicateList, this)) {
return;
}
/* Cost the optimizable at the current join position */
optimizable.optimizeIt(this, predicateList, outerCost, currentRowOrdering);
}
use of org.apache.derby.iapi.sql.compile.Optimizable in project derby by apache.
the class OptimizerImpl method pullOptimizableFromJoinOrder.
/**
* Pull whatever optimizable is at joinPosition in the proposed
* join order from the join order, and update all corresponding
* state accordingly.
*/
private void pullOptimizableFromJoinOrder() throws StandardException {
Optimizable pullMe = optimizableList.getOptimizable(proposedJoinOrder[joinPosition]);
/*
** Subtract the cost estimate of the optimizable being
** removed from the total cost estimate.
**
** The total cost is the sum of all the costs, but the total
** number of rows is the number of rows returned by the
** innermost optimizable.
*/
double prevRowCount;
double prevSingleScanRowCount;
int prevPosition = 0;
if (joinPosition == 0) {
prevRowCount = outermostCostEstimate.rowCount();
prevSingleScanRowCount = outermostCostEstimate.singleScanRowCount();
} else {
prevPosition = proposedJoinOrder[joinPosition - 1];
CostEstimate localCE = optimizableList.getOptimizable(prevPosition).getBestAccessPath().getCostEstimate();
prevRowCount = localCE.rowCount();
prevSingleScanRowCount = localCE.singleScanRowCount();
}
/*
** If there is no feasible join order, the cost estimate
** in the best access path may never have been set.
** In this case, do not subtract anything from the
** current cost, since nothing was added to the current
** cost.
*/
double newCost = currentCost.getEstimatedCost();
CostEstimate pullCostEstimate = pullMe.getBestAccessPath().getCostEstimate();
if (pullCostEstimate != null) {
double pullCost = pullCostEstimate.getEstimatedCost();
newCost -= pullCost;
/*
** It's possible for newCost to go negative here due to
** loss of precision--but that should ONLY happen if the
** optimizable we just pulled was at position 0. If we
** have a newCost that is <= 0 at any other time, then
** it's the result of a different kind of precision loss--
** namely, the estimated cost of pullMe was so large that
** we lost the precision of the accumulated cost as it
** existed prior to pullMe. Then when we subtracted
** pullMe's cost out, we ended up setting newCost to zero.
** That's an unfortunate side effect of optimizer cost
** estimates that grow too large. If that's what happened
** here,try to make some sense of things by adding up costs
** as they existed prior to pullMe...
*/
if (newCost <= 0.0) {
if (joinPosition == 0)
newCost = 0.0;
else
newCost = recoverCostFromProposedJoinOrder(false);
}
}
/* If we are choosing a new outer table, then
* we rest the starting cost to the outermostCost.
* (Thus avoiding any problems with floating point
* accuracy and going negative.)
*/
if (joinPosition == 0) {
if (outermostCostEstimate != null) {
newCost = outermostCostEstimate.getEstimatedCost();
} else {
newCost = 0.0;
}
}
currentCost.setCost(newCost, prevRowCount, prevSingleScanRowCount);
/*
** Subtract from the sort avoidance cost if there is a
** required row ordering.
**
** NOTE: It is not necessary here to check whether the
** best cost was ever set for the sort avoidance path,
** because it considerSortAvoidancePath() would not be
** set if there cost were not set.
*/
if (requiredRowOrdering != null) {
if (pullMe.considerSortAvoidancePath()) {
AccessPath ap = pullMe.getBestSortAvoidancePath();
double prevEstimatedCost;
/*
** Subtract the sort avoidance cost estimate of the
** optimizable being removed from the total sort
** avoidance cost estimate.
**
** The total cost is the sum of all the costs, but the
** total number of rows is the number of rows returned
** by the innermost optimizable.
*/
if (joinPosition == 0) {
prevRowCount = outermostCostEstimate.rowCount();
prevSingleScanRowCount = outermostCostEstimate.singleScanRowCount();
/* If we are choosing a new outer table, then
* we rest the starting cost to the outermostCost.
* (Thus avoiding any problems with floating point
* accuracy and going negative.)
*/
prevEstimatedCost = outermostCostEstimate.getEstimatedCost();
} else {
CostEstimate localCE = optimizableList.getOptimizable(prevPosition).getBestSortAvoidancePath().getCostEstimate();
prevRowCount = localCE.rowCount();
prevSingleScanRowCount = localCE.singleScanRowCount();
prevEstimatedCost = currentSortAvoidanceCost.getEstimatedCost() - ap.getCostEstimate().getEstimatedCost();
}
// See discussion above for "newCost"; same applies here.
if (prevEstimatedCost <= 0.0) {
if (joinPosition == 0)
prevEstimatedCost = 0.0;
else {
prevEstimatedCost = recoverCostFromProposedJoinOrder(true);
}
}
currentSortAvoidanceCost.setCost(prevEstimatedCost, prevRowCount, prevSingleScanRowCount);
/*
** Remove the table from the best row ordering.
** It should not be necessary to remove it from
** the current row ordering, because it is
** maintained as we step through the access paths
** for the current Optimizable.
*/
bestRowOrdering.removeOptimizable(pullMe.getTableNumber());
/*
** When removing a table from the join order,
** the best row ordering for the remaining outer tables
** becomes the starting point for the row ordering of
** the current table.
*/
bestRowOrdering.copy(currentRowOrdering);
}
}
/*
** Pull the predicates at from the optimizable and put
** them back in the predicate list.
**
** NOTE: This is a little inefficient because it pulls the
** single-table predicates, which are guaranteed to always
** be pushed to the same optimizable. We could make this
** leave the single-table predicates where they are.
*/
pullMe.pullOptPredicates(predicateList);
/*
** When we pull an Optimizable we need to go through and
** load whatever best path we found for that Optimizable
** with respect to this OptimizerImpl. The reason is that
** we could be pulling the Optimizable for the last time
** (before returning false), in which case we want it (the
** Optimizable) to be holding the best access path that it
** had at the time we found bestJoinOrder. This ensures
** that the access path which is generated and executed for
** the Optimizable matches the the access path decisions
** made by this OptimizerImpl for the best join order.
**
** NOTE: We we only reload the best plan if it's necessary
** to do so--i.e. if the best plans aren't already loaded.
** The plans will already be loaded if the last complete
** join order we had was the best one so far, because that
** means we called "rememberAsBest" on every Optimizable
** in the list and, as part of that call, we will run through
** and set trulyTheBestAccessPath for the entire subtree.
** So if we haven't tried any other plans since then,
** we know that every Optimizable (and its subtree) already
** has the correct best plan loaded in its trulyTheBest
** path field. It's good to skip the load in this case
** because 'reloading best plans' involves walking the
** entire subtree of _every_ Optimizable in the list, which
** can be expensive if there are deeply nested subqueries.
*/
if (reloadBestPlan)
pullMe.updateBestPlanMap(FromTable.LOAD_PLAN, this);
/* Mark current join position as unused */
proposedJoinOrder[joinPosition] = -1;
/* If we didn't advance the join position then the optimizable
* which currently sits at proposedJoinOrder[joinPosition]--call
* it PULL_ME--is *not* going to remain there. Instead, we're
* going to pull that optimizable from its position and attempt
* to put another one in its place. That said, when we try to
* figure out which of the other optimizables to place at
* joinPosition, we'll first do some "dependency checking", the
* result of which relies on the contents of assignedTableMap.
* Since assignedTableMap currently holds info about PULL_ME
* and since PULL_ME is *not* going to remain in the join order,
* we need to remove the info for PULL_ME from assignedTableMap.
* Otherwise an Optimizable which depends on PULL_ME could
* incorrectly be placed in the join order *before* PULL_ME,
* which would violate the dependency and lead to incorrect
* results. DERBY-3288.
*/
assignedTableMap.xor(pullMe.getReferencedTableMap());
}
use of org.apache.derby.iapi.sql.compile.Optimizable in project derby by apache.
the class OptimizerImpl method rewindJoinOrder.
private void rewindJoinOrder() throws StandardException {
for (; ; joinPosition--) {
Optimizable pullMe = optimizableList.getOptimizable(proposedJoinOrder[joinPosition]);
pullMe.pullOptPredicates(predicateList);
if (reloadBestPlan)
pullMe.updateBestPlanMap(FromTable.LOAD_PLAN, this);
proposedJoinOrder[joinPosition] = -1;
if (joinPosition == 0)
break;
}
currentCost.setCost(0.0d, 0.0d, 0.0d);
currentSortAvoidanceCost.setCost(0.0d, 0.0d, 0.0d);
assignedTableMap.clearAll();
}
use of org.apache.derby.iapi.sql.compile.Optimizable in project derby by apache.
the class OptimizerImpl method getNextPermutation.
/**
* @see Optimizer#getNextPermutation
*
* @exception StandardException Thrown on error
*/
public boolean getNextPermutation() throws StandardException {
/* Don't get any permutations if there is nothing to optimize */
if (numOptimizables < 1) {
if (tracingIsOn()) {
tracer().traceVacuous();
}
endOfRoundCleanup();
return false;
}
/* Make sure that all Optimizables init their access paths.
* (They wait until optimization since the access path
* references the optimizer.)
*/
optimizableList.initAccessPaths(this);
/*
** Experiments show that optimization time only starts to
** become a problem with seven tables, so only check for
** too much time if there are more than seven tables.
** Also, don't check for too much time if user has specified
** no timeout.
*/
if ((!timeExceeded) && (numTablesInQuery > 6) && (!noTimeout)) {
/*
** Stop optimizing if the time spent optimizing is greater than
** the current best cost.
*/
currentTime = System.currentTimeMillis();
timeExceeded = (currentTime - timeOptimizationStarted) > timeLimit;
if (tracingIsOn() && timeExceeded) {
tracer().traceTimeout(currentTime, bestCost);
}
}
if (bestCost.isUninitialized() && foundABestPlan && ((!usingPredsPushedFromAbove && !bestJoinOrderUsedPredsFromAbove) || timeExceeded)) {
/* We can get here if this OptimizerImpl is for a subquery
* that timed out for a previous permutation of the outer
* query, but then the outer query itself did _not_ timeout.
* In that case we'll end up back here for another round of
* optimization, but our timeExceeded flag will be true.
* We don't want to reset all of the timeout state here
* because that could lead to redundant work (see comments
* in prepForNextRound()), but we also don't want to return
* without having a plan, because then we'd return an unfairly
* high "bestCost" value--i.e. Double.MAX_VALUE. Note that
* we can't just revert back to whatever bestCost we had
* prior to this because that cost is for some previous
* permutation of the outer query--not the current permutation--
* and thus would be incorrect. So instead we have to delay
* the timeout until we find a complete (and valid) join order,
* so that we can return a valid cost estimate. Once we have
* a valid cost we'll then go through the timeout logic
* and stop optimizing.
*
* All of that said, instead of just trying the first possible
* join order, we jump to the join order that gave us the best
* cost in previous rounds. We know that such a join order exists
* because that's how our timeout value was set to begin with--so
* if there was no best join order, we never would have timed out
* and thus we wouldn't be here.
*
* We can also get here if we've already optimized the list
* of optimizables once (in a previous round of optimization)
* and now we're back to do it again. If that's true AND
* we did *not* receive any predicates pushed from above AND
* the bestJoinOrder from the previous round did *not* depend
* on predicates pushed from above, then we'll jump to the
* previous join order and start there. NOTE: if after jumping
* to the previous join order and calculating the cost we haven't
* timed out, we will continue looking at other join orders (as
* usual) until we exhaust them all or we time out.
*/
if (permuteState != JUMPING) {
// proceed with normal timeout logic.
if (firstLookOrder == null)
firstLookOrder = new int[numOptimizables];
System.arraycopy(bestJoinOrder, 0, firstLookOrder, 0, numOptimizables);
permuteState = JUMPING;
/* If we already assigned at least one position in the
* join order when this happened (i.e. if joinPosition
* is greater than *or equal* to zero; DERBY-1777), then
* reset the join order before jumping. The call to
* rewindJoinOrder() here will put joinPosition back
* to 0. But that said, we'll then end up incrementing
* joinPosition before we start looking for the next
* join order (see below), which means we need to set
* it to -1 here so that it gets incremented to "0" and
* then processing can continue as normal from there.
* Note: we don't need to set reloadBestPlan to true
* here because we only get here if we have *not* found
* a best plan yet.
*/
if (joinPosition >= 0) {
rewindJoinOrder();
joinPosition = -1;
}
}
// Reset the timeExceeded flag so that we'll keep going
// until we find a complete join order. NOTE: we intentionally
// do _not_ reset the timeOptimizationStarted value because we
// we want to go through this timeout logic for every
// permutation, to make sure we timeout as soon as we have
// our first complete join order.
timeExceeded = false;
}
/*
** Pick the next table in the join order, if there is an unused position
** in the join order, and the current plan is less expensive than
** the best plan so far, and the amount of time spent optimizing is
** still less than the cost of the best plan so far, and a best
** cost has been found in the current join position. Otherwise,
** just pick the next table in the current position.
*/
boolean joinPosAdvanced = false;
/* Determine if the current plan is still less expensive than
* the best plan so far. If bestCost is uninitialized then
* we want to return false here; if we didn't, then in the (rare)
* case where the current cost is greater than Double.MAX_VALUE
* (esp. if it's Double.POSITIVE_INFINITY, which can occur
* for very deeply nested queries with long FromLists) we would
* give up on the current plan even though we didn't have a
* best plan yet, which would be wrong. Also note: if we have
* a required row ordering then we might end up using the
* sort avoidance plan--but we don't know at this point
* which plan (sort avoidance or "normal") we're going to
* use, so we error on the side of caution and only short-
* circuit if both currentCost and currentSortAvoidanceCost
* (if the latter is applicable) are greater than bestCost.
*/
boolean alreadyCostsMore = !bestCost.isUninitialized() && (currentCost.compare(bestCost) > 0) && ((requiredRowOrdering == null) || (currentSortAvoidanceCost.compare(bestCost) > 0));
if ((joinPosition < (numOptimizables - 1)) && !alreadyCostsMore && (!timeExceeded)) {
/*
** Are we either starting at the first join position (in which
** case joinPosition will be -1), or has a best cost been found
** in the current join position? The latter case might not be
** true if there is no feasible join order.
*/
if ((joinPosition < 0) || optimizableList.getOptimizable(proposedJoinOrder[joinPosition]).getBestAccessPath().getCostEstimate() != null) {
joinPosition++;
joinPosAdvanced = true;
/*
** When adding a table to the join order, the best row
** row ordering for the outer tables becomes the starting
** point for the row ordering of the current table.
*/
bestRowOrdering.copy(currentRowOrdering);
}
} else {
// order.
if (joinPosition < (numOptimizables - 1)) {
if (tracingIsOn()) {
tracer().traceShortCircuiting(timeExceeded, optimizableList.getOptimizable(proposedJoinOrder[joinPosition]), joinPosition);
}
reloadBestPlan = true;
}
}
if (permuteState == JUMPING && !joinPosAdvanced && joinPosition >= 0) {
// not feeling well in the middle of jump
// Note: we have to make sure we reload the best plans
// as we rewind since they may have been clobbered
// (as part of the current join order) before we gave
// up on jumping.
reloadBestPlan = true;
// fall
rewindJoinOrder();
// give up
permuteState = NO_JUMP;
}
/*
** The join position becomes < 0 when all the permutations have been
** looked at.
*/
while (joinPosition >= 0) {
int nextOptimizable = proposedJoinOrder[joinPosition] + 1;
if (proposedJoinOrder[joinPosition] >= 0) {
/* We are either going to try another table at the current
* join order position, or we have exhausted all the tables
* at the current join order position. In either case, we
* need to pull the table at the current join order position
* and remove it from the join order. Do this BEFORE we
* search for the next optimizable so that assignedTableMap,
* which is updated to reflect the PULL, has the correct
* information for enforcing join order depdendencies.
* DERBY-3288.
*/
pullOptimizableFromJoinOrder();
}
if (desiredJoinOrderFound || timeExceeded) {
/*
** If the desired join order has been found (which will happen
** if the user specifies a join order), pretend that there are
** no more optimizables at this join position. This will cause
** us to back out of the current join order.
**
** Also, don't look at any more join orders if we have taken
** too much time with this optimization.
*/
nextOptimizable = numOptimizables;
} else if (// still jumping
permuteState == JUMPING) {
/* We're "jumping" to a join order that puts the optimizables
** with the lowest estimated costs first (insofar as it
** is legal to do so). The "firstLookOrder" array holds the
** ideal join order for position <joinPosition> up thru
** position <numOptimizables-1>. So here, we look at the
** ideal optimizable to place at <joinPosition> and see if
** it's legal; if it is, then we're done. Otherwise, we
** swap it with <numOptimizables-1> and see if that gives us
** a legal join order w.r.t <joinPosition>. If not, then we
** swap it with <numOptimizables-2> and check, and if that
** fails, then we swap it with <numOptimizables-3>, and so
** on. For example, assume we have 6 optimizables whose
** order from least expensive to most expensive is 2, 1, 4,
** 5, 3, 0. Assume also that we've already verified the
** legality of the first two positions--i.e. that joinPosition
** is now "2". That means that "firstLookOrder" currently
** contains the following:
**
** [ pos ] 0 1 2 3 4 5
** [ opt ] 2 1 4 5 3 0
**
** Then at this point, we do the following:
**
** -- Check to see if the ideal optimizable "4" is valid
** at its current position (2)
** -- If opt "4" is valid, then we're done; else we
** swap it with the value at position _5_:
**
** [ pos ] 0 1 2 3 4 5
** [ opt ] 2 1 0 5 3 4
**
** -- Check to see if optimizable "0" is valid at its
** new position (2).
** -- If opt "0" is valid, then we're done; else we
** put "0" back in its original position and swap
** the ideal optimizer ("4") with the value at
** position _4_:
**
** [ pos ] 0 1 2 3 4 5
** [ opt ] 2 1 3 5 4 0
**
** -- Check to see if optimizable "3" is valid at its
** new position (2).
** -- If opt "3" is valid, then we're done; else we
** put "3" back in its original position and swap
** the ideal optimizer ("4") with the value at
** position _3_:
**
** [ pos ] 0 1 2 3 4 5
** [ opt ] 2 1 5 4 3 0
**
** -- Check to see if optimizable "5" is valid at its
** new position (2).
** -- If opt "5" is valid, then we're done; else we've
** tried all the available optimizables and none
** of them are legal at position 2. In this case,
** we give up on "JUMPING" and fall back to normal
** join-order processing.
*/
int idealOptimizable = firstLookOrder[joinPosition];
nextOptimizable = idealOptimizable;
int lookPos = numOptimizables;
int lastSwappedOpt = -1;
Optimizable nextOpt;
for (nextOpt = optimizableList.getOptimizable(nextOptimizable); !(nextOpt.legalJoinOrder(assignedTableMap)); nextOpt = optimizableList.getOptimizable(nextOptimizable)) {
// Undo last swap, if we had one.
if (lastSwappedOpt >= 0) {
firstLookOrder[joinPosition] = idealOptimizable;
firstLookOrder[lookPos] = lastSwappedOpt;
}
if (lookPos > joinPosition + 1) {
// we still have other possibilities; get the next
// one by "swapping" it into the current position.
lastSwappedOpt = firstLookOrder[--lookPos];
firstLookOrder[joinPosition] = lastSwappedOpt;
firstLookOrder[lookPos] = idealOptimizable;
nextOptimizable = lastSwappedOpt;
} else {
// here.
if (joinPosition > 0) {
joinPosition--;
reloadBestPlan = true;
rewindJoinOrder();
}
permuteState = NO_JUMP;
break;
}
}
if (permuteState == NO_JUMP)
continue;
if (joinPosition == numOptimizables - 1) {
// we just set the final position within our
// "firstLookOrder" join order; now go ahead
// and search for the best join order, starting from
// the join order stored in "firstLookOrder". This
// is called walking "high" because we're searching
// the join orders that are at or "above" (after) the
// order found in firstLookOrder. Ex. if we had three
// optimizables and firstLookOrder was [1 2 0], then
// the "high" would be [1 2 0], [2 0 1] and [2 1 0];
// the "low" would be [0 1 2], [0 2 1], and [1 0 2].
// We walk the "high" first, then fall back and
// walk the "low".
permuteState = WALK_HIGH;
}
} else {
/* Find the next unused table at this join position */
for (; nextOptimizable < numOptimizables; nextOptimizable++) {
boolean found = false;
for (int posn = 0; posn < joinPosition; posn++) {
/*
** Is this optimizable already somewhere
** in the join order?
*/
if (proposedJoinOrder[posn] == nextOptimizable) {
found = true;
break;
}
}
/* No need to check the dependencies if the optimizable
* is already in the join order--because we should have
* checked its dependencies before putting it there.
*/
if (found) {
if (SanityManager.DEBUG) {
// Doesn't hurt to check in SANE mode, though...
if ((nextOptimizable < numOptimizables) && !joinOrderMeetsDependencies(nextOptimizable)) {
SanityManager.THROWASSERT("Found optimizable '" + nextOptimizable + "' in current join order even though " + "its dependencies were NOT satisfied.");
}
}
continue;
}
/* Check to make sure that all of the next optimizable's
* dependencies have been satisfied.
*/
if ((nextOptimizable < numOptimizables) && !joinOrderMeetsDependencies(nextOptimizable)) {
if (tracingIsOn()) {
tracer().traceSkippingJoinOrder(nextOptimizable, joinPosition, ArrayUtil.copy(proposedJoinOrder), (JBitSet) assignedTableMap.clone());
}
/*
** If this is a user specified join order then it is illegal.
*/
if (!optimizableList.optimizeJoinOrder()) {
if (tracingIsOn()) {
tracer().traceIllegalUserJoinOrder();
}
throw StandardException.newException(SQLState.LANG_ILLEGAL_FORCED_JOIN_ORDER);
}
continue;
}
break;
}
}
/* Have we exhausted all the optimizables at this join position? */
if (nextOptimizable >= numOptimizables) {
/*
** If we're not optimizing the join order, remember the first
** join order.
*/
if (!optimizableList.optimizeJoinOrder()) {
// Verify that the user specified a legal join order
if (!optimizableList.legalJoinOrder(numTablesInQuery)) {
if (tracingIsOn()) {
tracer().traceIllegalUserJoinOrder();
}
throw StandardException.newException(SQLState.LANG_ILLEGAL_FORCED_JOIN_ORDER);
}
if (tracingIsOn()) {
tracer().traceUserJoinOrderOptimized();
}
desiredJoinOrderFound = true;
}
if (permuteState == READY_TO_JUMP && joinPosition > 0 && joinPosition == numOptimizables - 1) {
permuteState = JUMPING;
/* A simple heuristics is that the row count we got
* indicates a potentially good join order. We'd like row
* count to get big as late as possible, so
* that less load is carried over.
*/
double[] rc = new double[numOptimizables];
for (int i = 0; i < numOptimizables; i++) {
firstLookOrder[i] = i;
CostEstimate ce = optimizableList.getOptimizable(i).getBestAccessPath().getCostEstimate();
if (ce == null) {
// come again?
permuteState = READY_TO_JUMP;
break;
}
rc[i] = ce.singleScanRowCount();
}
if (permuteState == JUMPING) {
boolean doIt = false;
int temp;
for (// simple selection sort
int i = 0; // simple selection sort
i < numOptimizables; // simple selection sort
i++) {
int k = i;
for (int j = i + 1; j < numOptimizables; j++) if (rc[j] < rc[k])
k = j;
if (k != i) {
// destroy the bridge
rc[k] = rc[i];
temp = firstLookOrder[i];
firstLookOrder[i] = firstLookOrder[k];
firstLookOrder[k] = temp;
doIt = true;
}
}
if (doIt) {
joinPosition--;
// jump from ground
rewindJoinOrder();
continue;
} else
// never
permuteState = NO_JUMP;
}
}
/*
** We have exhausted all the optimizables at this level.
** Go back up one level.
*/
/* Go back up one join position */
joinPosition--;
if (// reached peak
joinPosition < 0 && permuteState == WALK_HIGH) {
// reset, fall down the hill
joinPosition = 0;
permuteState = WALK_LOW;
}
continue;
}
/*
** We have found another optimizable to try at this join position.
*/
proposedJoinOrder[joinPosition] = nextOptimizable;
if (permuteState == WALK_LOW) {
boolean finishedCycle = true;
for (int i = 0; i < numOptimizables; i++) {
if (proposedJoinOrder[i] < firstLookOrder[i]) {
finishedCycle = false;
break;
} else if (// done
proposedJoinOrder[i] > firstLookOrder[i])
break;
}
if (finishedCycle) {
// We just set proposedJoinOrder[joinPosition] above, so
// if we're done we need to put it back to -1 to indicate
// that it's an empty slot. Then we rewind and pull any
// other Optimizables at positions < joinPosition.
// Note: we have to make sure we reload the best plans
// as we rewind since they may have been clobbered
// (as part of the current join order) before we got
// here.
proposedJoinOrder[joinPosition] = -1;
joinPosition--;
if (joinPosition >= 0) {
reloadBestPlan = true;
rewindJoinOrder();
joinPosition = -1;
}
permuteState = READY_TO_JUMP;
endOfRoundCleanup();
return false;
}
}
/* Re-init (clear out) the cost for the best access path
* when placing a table.
*/
optimizableList.getOptimizable(nextOptimizable).getBestAccessPath().setCostEstimate((CostEstimate) null);
if (tracingIsOn()) {
tracer().traceJoinOrderConsideration(joinPosition, ArrayUtil.copy(proposedJoinOrder), (JBitSet) assignedTableMap.clone());
}
Optimizable nextOpt = optimizableList.getOptimizable(nextOptimizable);
/* Update the assigned table map to include the newly-placed
* Optimizable in the current join order. Assumption is that
* this OR can always be undone using an XOR, which will only
* be true if none of the Optimizables have overlapping table
* maps. The XOR itself occurs as part of optimizable "PULL"
* processing.
*/
if (SanityManager.DEBUG) {
JBitSet optMap = (JBitSet) nextOpt.getReferencedTableMap().clone();
optMap.and(assignedTableMap);
if (optMap.getFirstSetBit() != -1) {
SanityManager.THROWASSERT("Found multiple optimizables that share one or " + "more referenced table numbers (esp: '" + optMap + "'), but that should not be the case.");
}
}
assignedTableMap.or(nextOpt.getReferencedTableMap());
nextOpt.startOptimizing(this, currentRowOrdering);
pushPredicates(optimizableList.getOptimizable(nextOptimizable), assignedTableMap);
return true;
}
endOfRoundCleanup();
return false;
}
Aggregations