Search in sources :

Example 1 with Optimizer

use of org.apache.derby.iapi.sql.compile.Optimizer in project derby by apache.

the class SelectNode method optimize.

/**
 * Optimize this SelectNode.  This means choosing the best access path
 * for each table, among other things.
 *
 * @param dataDictionary	The DataDictionary to use for optimization
 * @param predicateList		The predicate list to optimize against
 * @param outerRows			The number of outer joining rows
 *
 * @return	ResultSetNode	The top of the optimized tree
 *
 * @exception StandardException		Thrown on error
 */
@Override
ResultSetNode optimize(DataDictionary dataDictionary, PredicateList predicateList, double outerRows) throws StandardException {
    Optimizer opt;
    /* selectSubquerys is always allocated at bind() time */
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(selectSubquerys != null, "selectSubquerys is expected to be non-null");
    }
    // remove duplicate columns, e.g., "ORDER BY 1, 1, 2".
    for (int i = 0; i < qec.size(); i++) {
        final OrderByList obl = qec.getOrderByList(i);
        if (obl != null && obl.size() > 1) {
            obl.removeDupColumns();
        }
    }
    if (wherePredicates != null) {
        // Iterate backwards because we might be deleting entries.
        for (int i = wherePredicates.size() - 1; i >= 0; i--) {
            if (wherePredicates.elementAt(i).isScopedForPush()) {
                wherePredicates.removeOptPredicate(i);
            }
        }
    }
    /* With DERBY-805 we take any optimizable predicates that
		 * were pushed into this node and we add them to the list of
		 * predicates that we pass to the optimizer, thus allowing
		 * the optimizer to use them when choosing an access path
		 * for this SELECT node.  We do that by adding the predicates
		 * to our WHERE list, since the WHERE predicate list is what
		 * we pass to the optimizer for this select node (see below).
		 * We have to pass the WHERE list directly (as opposed to
		 * passing a copy) because the optimizer is only created one
		 * time; it then uses the list we pass it for the rest of the
		 * optimization phase and finally for "modifyAccessPaths()".
		 * Since the optimizer can update/modify the list based on the
		 * WHERE predicates (such as by adding internal predicates or
		 * by modifying the actual predicates themselves), we need
		 * those changes to be applied to the WHERE list directly for
		 * subsequent processing (esp. for modification of the access
		 * path).  Note that by adding outer opt predicates directly
		 * to the WHERE list, we're changing the semantics of this
		 * SELECT node.  This is only temporary, though--once the
		 * optimizer is done with all of its work, any predicates
		 * that were pushed here will have been pushed even further
		 * down and thus will have been removed from the WHERE list
		 * (if it's not possible to push them further down, then they
		 * shouldn't have made it this far to begin with).
		 */
    if (predicateList != null) {
        if (wherePredicates == null) {
            wherePredicates = new PredicateList(getContextManager());
        }
        int sz = predicateList.size();
        for (int i = sz - 1; i >= 0; i--) {
            // We can tell if a predicate was pushed into this select
            // node because it will have been "scoped" for this node
            // or for some result set below this one.
            Predicate pred = (Predicate) predicateList.getOptPredicate(i);
            if (pred.isScopedToSourceResultSet()) {
                // If we're pushing the predicate down here, we have to
                // remove it from the predicate list of the node above
                // this select, in order to keep in line with established
                // push 'protocol'.
                wherePredicates.addOptPredicate(pred);
                predicateList.removeOptPredicate(pred);
            }
        }
    }
    opt = getOptimizer(fromList, wherePredicates, dataDictionary, // use first one
    qec.getOrderByList(0), overridingPlan);
    opt.setOuterRows(outerRows);
    /* Optimize this SelectNode */
    while (opt.getNextPermutation()) {
        while (opt.getNextDecoratedPermutation()) {
            opt.costPermutation();
        }
    }
    /* When we're done optimizing, any scoped predicates that
		 * we pushed down the tree should now be sitting again
		 * in our wherePredicates list.  Put those back in the
		 * the list from which we received them, to allow them
		 * to be "pulled" back up to where they came from.
		 */
    if (wherePredicates != null) {
        for (int i = wherePredicates.size() - 1; i >= 0; i--) {
            Predicate pred = (Predicate) wherePredicates.getOptPredicate(i);
            if (pred.isScopedForPush()) {
                predicateList.addOptPredicate(pred);
                wherePredicates.removeOptPredicate(pred);
            }
        }
    }
    /* Get the cost */
    setCostEstimate(opt.getOptimizedCost());
    /* Update row counts if this is a scalar aggregate */
    if ((selectAggregates != null) && (selectAggregates.size() > 0)) {
        getCostEstimate().setEstimatedRowCount((long) outerRows);
        getCostEstimate().setSingleScanRowCount(1);
    }
    selectSubquerys.optimize(dataDictionary, getCostEstimate().rowCount());
    if (whereSubquerys != null && whereSubquerys.size() > 0) {
        whereSubquerys.optimize(dataDictionary, getCostEstimate().rowCount());
    }
    if (havingSubquerys != null && havingSubquerys.size() > 0) {
        havingSubquerys.optimize(dataDictionary, getCostEstimate().rowCount());
    }
    // dispose of the optimizer we created above
    if (optimizerTracingIsOn()) {
        getOptimizerTracer().traceEndQueryBlock();
    }
    return this;
}
Also used : OptimizablePredicateList(org.apache.derby.iapi.sql.compile.OptimizablePredicateList) Optimizer(org.apache.derby.iapi.sql.compile.Optimizer)

Example 2 with Optimizer

use of org.apache.derby.iapi.sql.compile.Optimizer in project derby by apache.

the class FromBaseTable method changeAccessPath.

/**
 * @see ResultSetNode#changeAccessPath
 *
 * @exception StandardException		Thrown on error
 */
@Override
ResultSetNode changeAccessPath() throws StandardException {
    ResultSetNode retval;
    AccessPath ap = getTrulyTheBestAccessPath();
    ConglomerateDescriptor trulyTheBestConglomerateDescriptor = ap.getConglomerateDescriptor();
    JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
    Optimizer opt = ap.getOptimizer();
    if (optimizerTracingIsOn()) {
        getOptimizerTracer().traceChangingAccessPathForTable(tableNumber);
    }
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(trulyTheBestConglomerateDescriptor != null, "Should only modify access path after conglomerate has been chosen.");
    }
    /*
		** Make sure user-specified bulk fetch is OK with the chosen join
		** strategy.
		*/
    if (bulkFetch != UNSET) {
        if (!trulyTheBestJoinStrategy.bulkFetchOK()) {
            throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_WITH_JOIN_TYPE, trulyTheBestJoinStrategy.getName());
        } else // bulkFetch has no meaning for hash join, just ignore it
        if (trulyTheBestJoinStrategy.ignoreBulkFetch()) {
            disableBulkFetch();
        } else // bug 4431 - ignore bulkfetch property if it's 1 row resultset
        if (isOneRowResultSet()) {
            disableBulkFetch();
        }
    }
    // bulkFetch = 1 is the same as no bulk fetch
    if (bulkFetch == 1) {
        disableBulkFetch();
    }
    /* Remove any redundant join clauses.  A redundant join clause is one
		 * where there are other join clauses in the same equivalence class
		 * after it in the PredicateList.
		 */
    restrictionList.removeRedundantPredicates();
    /*
		** Divide up the predicates for different processing phases of the
		** best join strategy.
		*/
    storeRestrictionList = new PredicateList(getContextManager());
    nonStoreRestrictionList = new PredicateList(getContextManager());
    requalificationRestrictionList = new PredicateList(getContextManager());
    trulyTheBestJoinStrategy.divideUpPredicateLists(this, restrictionList, storeRestrictionList, nonStoreRestrictionList, requalificationRestrictionList, getDataDictionary());
    /* Check to see if we are going to do execution-time probing
		 * of an index using IN-list values.  We can tell by looking
		 * at the restriction list: if there is an IN-list probe
		 * predicate that is also a start/stop key then we know that
		 * we're going to do execution-time probing.  In that case
		 * we disable bulk fetching to minimize the number of non-
		 * matching rows that we read from disk.  RESOLVE: Do we
		 * really need to completely disable bulk fetching here,
		 * or can we do something else?
		 */
    for (Predicate pred : restrictionList) {
        if (pred.isInListProbePredicate() && pred.isStartKey()) {
            disableBulkFetch();
            multiProbing = true;
            break;
        }
    }
    /*
		** Consider turning on bulkFetch if it is turned
		** off.  Only turn it on if it is a not an updatable
		** scan and if it isn't a oneRowResultSet, and
		** not a subquery, and it is OK to use bulk fetch
		** with the chosen join strategy.  NOTE: the subquery logic
		** could be more sophisticated -- we are taking
		** the safe route in avoiding reading extra
		** data for something like:
		**
		**	select x from t where x in (select y from t)
	 	**
		** In this case we want to stop the subquery
		** evaluation as soon as something matches.
		*/
    if (trulyTheBestJoinStrategy.bulkFetchOK() && !(trulyTheBestJoinStrategy.ignoreBulkFetch()) && !bulkFetchTurnedOff && (bulkFetch == UNSET) && !forUpdate() && !isOneRowResultSet() && getLevel() == 0 && !validatingCheckConstraint) {
        bulkFetch = getDefaultBulkFetch();
    }
    /* Statement is dependent on the chosen conglomerate. */
    getCompilerContext().createDependency(trulyTheBestConglomerateDescriptor);
    /* No need to modify access path if conglomerate is the heap */
    if (!trulyTheBestConglomerateDescriptor.isIndex()) {
        /*
			** We need a little special logic for SYSSTATEMENTS
			** here.  SYSSTATEMENTS has a hidden column at the
			** end.  When someone does a select * we don't want
			** to get that column from the store.  So we'll always
			** generate a partial read bitSet if we are scanning
			** SYSSTATEMENTS to ensure we don't get the hidden
			** column.
			*/
        boolean isSysstatements = tableName.equals("SYS", "SYSSTATEMENTS");
        /* Template must reflect full row.
			 * Compact RCL down to partial row.
			 */
        templateColumns = getResultColumns();
        referencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), isSysstatements, false);
        setResultColumns(getResultColumns().compactColumns(isCursorTargetTable(), isSysstatements));
        return this;
    }
    /* Derby-1087: use data page when returning an updatable resultset */
    if (ap.getCoveringIndexScan() && (!isCursorTargetTable())) {
        /* Massage resultColumns so that it matches the index. */
        setResultColumns(newResultColumns(getResultColumns(), trulyTheBestConglomerateDescriptor, baseConglomerateDescriptor, false));
        /* We are going against the index.  The template row must be the full index row.
			 * The template row will have the RID but the result row will not
			 * since there is no need to go to the data page.
			 */
        templateColumns = newResultColumns(getResultColumns(), trulyTheBestConglomerateDescriptor, baseConglomerateDescriptor, false);
        templateColumns.addRCForRID();
        // If this is for update then we need to get the RID in the result row
        if (forUpdate()) {
            getResultColumns().addRCForRID();
        }
        /* Compact RCL down to the partial row.  We always want a new
			 * RCL and FormatableBitSet because this is a covering index.  (This is 
			 * because we don't want the RID in the partial row returned
			 * by the store.)
			 */
        referencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), true, false);
        setResultColumns(getResultColumns().compactColumns(isCursorTargetTable(), true));
        getResultColumns().setIndexRow(baseConglomerateDescriptor.getConglomerateNumber(), forUpdate());
        return this;
    }
    /* Statement is dependent on the base conglomerate if this is 
		 * a non-covering index. 
		 */
    getCompilerContext().createDependency(baseConglomerateDescriptor);
    /*
		** On bulkFetch, we need to add the restrictions from
		** the TableScan and reapply them  here.
		*/
    if (bulkFetch != UNSET) {
        restrictionList.copyPredicatesToOtherList(requalificationRestrictionList);
    }
    /*
		** We know the chosen conglomerate is an index.  We need to allocate
		** an IndexToBaseRowNode above us, and to change the result column
		** list for this FromBaseTable to reflect the columns in the index.
		** We also need to shift "cursor target table" status from this
		** FromBaseTable to the new IndexToBaseRowNow (because that's where
		** a cursor can fetch the current row).
		*/
    ResultColumnList newResultColumns = newResultColumns(getResultColumns(), trulyTheBestConglomerateDescriptor, baseConglomerateDescriptor, true);
    /* Compact the RCL for the IndexToBaseRowNode down to
		 * the partial row for the heap.  The referenced BitSet
		 * will reflect only those columns coming from the heap.
		 * (ie, it won't reflect columns coming from the index.)
		 * NOTE: We need to re-get all of the columns from the heap
		 * when doing a bulk fetch because we will be requalifying
		 * the row in the IndexRowToBaseRow.
		 */
    // Get the BitSet for all of the referenced columns
    FormatableBitSet indexReferencedCols = null;
    FormatableBitSet heapReferencedCols;
    if ((bulkFetch == UNSET) && (requalificationRestrictionList == null || requalificationRestrictionList.size() == 0)) {
        /* No BULK FETCH or requalification, XOR off the columns coming from the heap 
			 * to get the columns coming from the index.
			 */
        indexReferencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), true, false);
        heapReferencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), true, true);
        if (heapReferencedCols != null) {
            indexReferencedCols.xor(heapReferencedCols);
        }
    } else {
        // BULK FETCH or requalification - re-get all referenced columns from the heap
        heapReferencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), true, false);
    }
    ResultColumnList heapRCL = getResultColumns().compactColumns(isCursorTargetTable(), false);
    heapRCL.setIndexRow(baseConglomerateDescriptor.getConglomerateNumber(), forUpdate());
    retval = new IndexToBaseRowNode(this, baseConglomerateDescriptor, heapRCL, isCursorTargetTable(), heapReferencedCols, indexReferencedCols, requalificationRestrictionList, forUpdate(), tableProperties, getContextManager());
    /*
		** The template row is all the columns.  The
		** result set is the compacted column list.
		*/
    setResultColumns(newResultColumns);
    templateColumns = newResultColumns(getResultColumns(), trulyTheBestConglomerateDescriptor, baseConglomerateDescriptor, false);
    /* Since we are doing a non-covered index scan, if bulkFetch is on, then
		 * the only columns that we need to get are those columns referenced in the start and stop positions
		 * and the qualifiers (and the RID) because we will need to re-get all of the other
		 * columns from the heap anyway.
		 * At this point in time, columns referenced anywhere in the column tree are 
		 * marked as being referenced.  So, we clear all of the references, walk the 
		 * predicate list and remark the columns referenced from there and then add
		 * the RID before compacting the columns.
		 */
    if (bulkFetch != UNSET) {
        getResultColumns().markAllUnreferenced();
        storeRestrictionList.markReferencedColumns();
        if (nonStoreRestrictionList != null) {
            nonStoreRestrictionList.markReferencedColumns();
        }
    }
    getResultColumns().addRCForRID();
    templateColumns.addRCForRID();
    // Compact the RCL for the index scan down to the partial row.
    referencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), false, false);
    setResultColumns(getResultColumns().compactColumns(isCursorTargetTable(), false));
    getResultColumns().setIndexRow(baseConglomerateDescriptor.getConglomerateNumber(), forUpdate());
    /* We must remember if this was the cursorTargetTable
 		 * in order to get the right locking on the scan.
		 */
    getUpdateLocks = isCursorTargetTable();
    setCursorTargetTable(false);
    return retval;
}
Also used : OptimizablePredicateList(org.apache.derby.iapi.sql.compile.OptimizablePredicateList) Optimizer(org.apache.derby.iapi.sql.compile.Optimizer) AccessPath(org.apache.derby.iapi.sql.compile.AccessPath) JoinStrategy(org.apache.derby.iapi.sql.compile.JoinStrategy) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet) ConglomerateDescriptor(org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor) OptimizablePredicate(org.apache.derby.iapi.sql.compile.OptimizablePredicate)

Example 3 with Optimizer

use of org.apache.derby.iapi.sql.compile.Optimizer in project derby by apache.

the class FromTable method updateBestPlanMap.

/**
 * @see Optimizable#updateBestPlanMap
 */
public void updateBestPlanMap(short action, Object planKey) throws StandardException {
    if (action == REMOVE_PLAN) {
        if (bestPlanMap != null) {
            bestPlanMap.remove(planKey);
            if (bestPlanMap.isEmpty()) {
                bestPlanMap = null;
            }
        }
        return;
    }
    AccessPath bestPath = getTrulyTheBestAccessPath();
    AccessPathImpl ap = null;
    if (action == ADD_PLAN) {
        // there will be no best path--so there's nothing to do.
        if (bestPath == null)
            return;
        // AccessPath for the received key and use that if we can.
        if (bestPlanMap == null)
            bestPlanMap = new HashMap<Object, AccessPathImpl>();
        else
            ap = bestPlanMap.get(planKey);
        // otherwise just pass null.
        if (ap == null) {
            if (planKey instanceof Optimizer)
                ap = new AccessPathImpl((Optimizer) planKey);
            else
                ap = new AccessPathImpl((Optimizer) null);
        }
        ap.copy(bestPath);
        bestPlanMap.put(planKey, ap);
        return;
    }
    // join order for which there was no valid plan.
    if (bestPlanMap == null)
        return;
    ap = bestPlanMap.get(planKey);
    // the key, in which case there's nothing to load.
    if ((ap == null) || (ap.getCostEstimate() == null))
        return;
    // We found a best plan in our map, so load it into this Optimizable's
    // trulyTheBestAccessPath field.
    bestPath.copy(ap);
}
Also used : HashMap(java.util.HashMap) Optimizer(org.apache.derby.iapi.sql.compile.Optimizer) AccessPath(org.apache.derby.iapi.sql.compile.AccessPath)

Example 4 with Optimizer

use of org.apache.derby.iapi.sql.compile.Optimizer in project derby by apache.

the class FromTable method rememberJoinStrategyAsBest.

/**
 * @see Optimizable#rememberJoinStrategyAsBest
 */
public void rememberJoinStrategyAsBest(AccessPath ap) {
    Optimizer opt = ap.getOptimizer();
    ap.setJoinStrategy(getCurrentAccessPath().getJoinStrategy());
    if (optimizerTracingIsOn()) {
        getOptimizerTracer().traceRememberingJoinStrategy(getCurrentAccessPath().getJoinStrategy(), tableNumber);
    }
    if (ap == bestAccessPath) {
        if (optimizerTracingIsOn()) {
            getOptimizerTracer().traceRememberingBestAccessPathSubstring(ap, tableNumber);
        }
    } else if (ap == bestSortAvoidancePath) {
        if (optimizerTracingIsOn()) {
            getOptimizerTracer().traceRememberingBestSortAvoidanceAccessPathSubstring(ap, tableNumber);
        }
    } else {
        /* We currently get here when optimizing an outer join.
			 * (Problem predates optimizer trace change.)
			 * RESOLVE - fix this at some point.
			if (SanityManager.DEBUG)
			{
				SanityManager.THROWASSERT(
					"unknown access path type");
			}
			 */
        if (optimizerTracingIsOn()) {
            getOptimizerTracer().traceRememberingBestUnknownAccessPathSubstring(ap, tableNumber);
        }
    }
}
Also used : Optimizer(org.apache.derby.iapi.sql.compile.Optimizer)

Aggregations

Optimizer (org.apache.derby.iapi.sql.compile.Optimizer)4 AccessPath (org.apache.derby.iapi.sql.compile.AccessPath)2 OptimizablePredicateList (org.apache.derby.iapi.sql.compile.OptimizablePredicateList)2 HashMap (java.util.HashMap)1 FormatableBitSet (org.apache.derby.iapi.services.io.FormatableBitSet)1 JoinStrategy (org.apache.derby.iapi.sql.compile.JoinStrategy)1 OptimizablePredicate (org.apache.derby.iapi.sql.compile.OptimizablePredicate)1 ConglomerateDescriptor (org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor)1