use of org.apache.derby.catalog.IndexDescriptor in project derby by apache.
the class GroupByNode method considerPostOptimizeOptimizations.
/**
* Consider any optimizations after the optimizer has chosen a plan.
* Optimizations include:
* o min optimization for scalar aggregates
* o max optimization for scalar aggregates
*
* @param selectHasPredicates true if SELECT containing this
* vector/scalar aggregate has a restriction
*
* @exception StandardException on error
*/
void considerPostOptimizeOptimizations(boolean selectHasPredicates) throws StandardException {
/* Consider the optimization for min with asc index on that column or
* max with desc index on that column:
* o No group by
* o One of:
* o min/max(ColumnReference) is only aggregate && source is
* ordered on the ColumnReference
* o min/max(ConstantNode)
* The optimization of the other way around (min with desc index or
* max with asc index) has the same restrictions with the additional
* temporary restriction of no qualifications at all (because
* we don't have true backward scans).
*/
if (groupingList == null) {
if (aggregates.size() == 1) {
AggregateNode an = aggregates.get(0);
AggregateDefinition ad = an.getAggregateDefinition();
if (ad instanceof MaxMinAggregateDefinition) {
if (an.getOperand() instanceof ColumnReference) {
/* See if the underlying ResultSet tree
* is ordered on the ColumnReference.
*/
ColumnReference[] crs = { (ColumnReference) an.getOperand() };
// Holder list for the FromBaseTable. We expect no more
// than one table, hence initial capacity is 1.
ArrayList<FromBaseTable> fbtHolder = new ArrayList<FromBaseTable>(1);
boolean minMaxOptimizationPossible = isOrderedOn(crs, false, fbtHolder);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(fbtHolder.size() <= 1, "bad number of FromBaseTables returned by isOrderedOn() -- " + fbtHolder.size());
}
if (minMaxOptimizationPossible) {
boolean ascIndex = true;
int colNum = crs[0].getColumnNumber();
/* Check if we have an access path, this will be
* null in a join case (See Beetle 4423,DERBY-3904)
*/
AccessPath accessPath = getTrulyTheBestAccessPath();
if (accessPath == null || accessPath.getConglomerateDescriptor() == null || accessPath.getConglomerateDescriptor().getIndexDescriptor() == null)
return;
IndexDescriptor id = accessPath.getConglomerateDescriptor().getIndexDescriptor();
int[] keyColumns = id.baseColumnPositions();
boolean[] isAscending = id.isAscending();
for (int i = 0; i < keyColumns.length; i++) {
/* in such a query: select min(c3) from
* tab1 where c1 = 2 and c2 = 5, if prefix keys
* have equality operator, then we can still use
* the index. The checking of equality operator
* has been done in isStrictlyOrderedOn.
*/
if (colNum == keyColumns[i]) {
if (!isAscending[i])
ascIndex = false;
break;
}
}
FromBaseTable fbt = fbtHolder.get(0);
MaxMinAggregateDefinition temp = (MaxMinAggregateDefinition) ad;
if (((!temp.isMax()) && ascIndex) || ((temp.isMax()) && !ascIndex)) {
fbt.disableBulkFetch();
singleInputRowOptimization = true;
} else /*
** Max optimization with asc index or min with
** desc index is currently more
** restrictive than otherwise.
** We are getting the store to return the last
** row from an index (for the time being, the
** store cannot do real backward scans). SO
** we cannot do this optimization if we have
** any predicates at all.
*/
if (!selectHasPredicates && ((temp.isMax() && ascIndex) || (!temp.isMax() && !ascIndex))) {
fbt.disableBulkFetch();
fbt.doSpecialMaxScan();
singleInputRowOptimization = true;
}
}
} else if (an.getOperand() instanceof ConstantNode) {
singleInputRowOptimization = true;
}
}
}
}
}
use of org.apache.derby.catalog.IndexDescriptor in project derby by apache.
the class AlterTableConstantAction method getAffectedIndexes.
/**
* Get info on the indexes on the table being compressed.
*
* @exception StandardException Thrown on error
*/
private void getAffectedIndexes() throws StandardException {
IndexLister indexLister = td.getIndexLister();
/* We have to get non-distinct index row generaters and conglom numbers
* here and then compress it to distinct later because drop column
* will need to change the index descriptor directly on each index
* entry in SYSCONGLOMERATES, on duplicate indexes too.
*/
compressIRGs = indexLister.getIndexRowGenerators();
numIndexes = compressIRGs.length;
indexConglomerateNumbers = indexLister.getIndexConglomerateNumbers();
if (// then it's drop column
!(compressTable || truncateTable)) {
ArrayList<ConstantAction> newCongloms = new ArrayList<ConstantAction>();
for (int i = 0; i < compressIRGs.length; i++) {
int[] baseColumnPositions = compressIRGs[i].baseColumnPositions();
int j;
for (j = 0; j < baseColumnPositions.length; j++) if (baseColumnPositions[j] == droppedColumnPosition)
break;
if (// not related
j == baseColumnPositions.length)
continue;
if (baseColumnPositions.length == 1 || (behavior == StatementType.DROP_CASCADE && compressIRGs[i].isUnique())) {
numIndexes--;
/* get first conglomerate with this conglom number each time
* and each duplicate one will be eventually all dropped
*/
ConglomerateDescriptor cd = td.getConglomerateDescriptor(indexConglomerateNumbers[i]);
dropConglomerate(cd, td, true, newCongloms, activation, activation.getLanguageConnectionContext());
// mark it
compressIRGs[i] = null;
continue;
}
// a constraint, because constraints have already been handled
if (compressIRGs[i].isUnique()) {
ConglomerateDescriptor cd = td.getConglomerateDescriptor(indexConglomerateNumbers[i]);
throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(DependencyManager.DROP_COLUMN), columnInfo[0].name, "UNIQUE INDEX", cd.getConglomerateName());
}
}
/* If there are new backing conglomerates which must be
* created to replace a dropped shared conglomerate
* (where the shared conglomerate was dropped as part
* of a "drop conglomerate" call above), then create
* them now. We do this *after* dropping all dependent
* conglomerates because we don't want to waste time
* creating a new conglomerate if it's just going to be
* dropped again as part of another "drop conglomerate"
* call.
*/
createNewBackingCongloms(newCongloms, indexConglomerateNumbers);
IndexRowGenerator[] newIRGs = new IndexRowGenerator[numIndexes];
long[] newIndexConglomNumbers = new long[numIndexes];
collation = new int[numIndexes][];
for (int i = 0, j = 0; i < numIndexes; i++, j++) {
while (compressIRGs[j] == null) j++;
// Setup collation id array to be passed in on call to create index.
collation[i] = compressIRGs[j].getColumnCollationIds(td.getColumnDescriptorList());
int[] baseColumnPositions = compressIRGs[j].baseColumnPositions();
newIRGs[i] = compressIRGs[j];
newIndexConglomNumbers[i] = indexConglomerateNumbers[j];
boolean[] isAscending = compressIRGs[j].isAscending();
boolean reMakeArrays = false;
boolean rewriteBaseColumnPositions = false;
int size = baseColumnPositions.length;
for (int k = 0; k < size; k++) {
if (baseColumnPositions[k] > droppedColumnPosition) {
baseColumnPositions[k]--;
rewriteBaseColumnPositions = true;
} else if (baseColumnPositions[k] == droppedColumnPosition) {
// mark it
baseColumnPositions[k] = 0;
reMakeArrays = true;
}
}
if (rewriteBaseColumnPositions) {
compressIRGs[j].setBaseColumnPositions(baseColumnPositions);
}
if (reMakeArrays) {
size--;
int[] newBCP = new int[size];
boolean[] newIsAscending = new boolean[size];
int[] newCollation = new int[collation[i].length - 1];
for (int k = 0, step = 0; k < size; k++) {
if (step == 0 && baseColumnPositions[k + step] == 0)
step++;
newBCP[k] = baseColumnPositions[k + step];
newIsAscending[k] = isAscending[k + step];
newCollation[k] = collation[i][k + step];
}
IndexDescriptor id = compressIRGs[j].getIndexDescriptor();
id.setBaseColumnPositions(newBCP);
id.setIsAscending(newIsAscending);
id.setNumberOfOrderedColumns(id.numberOfOrderedColumns() - 1);
collation[i] = newCollation;
}
}
compressIRGs = newIRGs;
indexConglomerateNumbers = newIndexConglomNumbers;
} else {
collation = new int[numIndexes][];
for (int i = 0; i < numIndexes; i++) {
collation[i] = compressIRGs[i].getColumnCollationIds(td.getColumnDescriptorList());
}
}
/* Now we are done with updating each index descriptor entry directly
* in SYSCONGLOMERATES (for duplicate index as well), from now on, our
* work should apply ONLY once for each real conglomerate, so we
* compress any duplicate indexes now.
*/
Object[] compressIndexResult = compressIndexArrays(indexConglomerateNumbers, compressIRGs);
if (compressIndexResult != null) {
indexConglomerateNumbers = (long[]) compressIndexResult[1];
compressIRGs = (IndexRowGenerator[]) compressIndexResult[2];
numIndexes = indexConglomerateNumbers.length;
}
indexedCols = new FormatableBitSet(compressTable || truncateTable ? td.getNumberOfColumns() + 1 : td.getNumberOfColumns());
for (int index = 0; index < numIndexes; index++) {
int[] colIds = compressIRGs[index].getIndexDescriptor().baseColumnPositions();
for (int index2 = 0; index2 < colIds.length; index2++) {
indexedCols.set(colIds[index2]);
}
}
}
use of org.apache.derby.catalog.IndexDescriptor in project derby by apache.
the class FromBaseTable method supersetOfUniqueIndex.
/**
* Determine whether or not the columns marked as true in
* the passed in array are a superset of any unique index
* on this table.
* This is useful for subquery flattening and distinct elimination
* based on a uniqueness condition.
*
* @param eqCols The columns to consider
*
* @return Whether or not the columns marked as true are a superset
*/
protected boolean supersetOfUniqueIndex(boolean[] eqCols) throws StandardException {
ConglomerateDescriptor[] cds = tableDescriptor.getConglomerateDescriptors();
/* Cycle through the ConglomerateDescriptors */
for (int index = 0; index < cds.length; index++) {
ConglomerateDescriptor cd = cds[index];
if (!cd.isIndex()) {
continue;
}
IndexDescriptor id = cd.getIndexDescriptor();
if (!id.isUnique()) {
continue;
}
int[] keyColumns = id.baseColumnPositions();
int inner = 0;
for (; inner < keyColumns.length; inner++) {
if (!eqCols[keyColumns[inner]]) {
break;
}
}
/* Did we get a full match? */
if (inner == keyColumns.length) {
return true;
}
}
return false;
}
use of org.apache.derby.catalog.IndexDescriptor in project derby by apache.
the class FromBaseTable method supersetOfUniqueIndex.
/**
* Determine whether or not the columns marked as true in
* the passed in join table matrix are a superset of any single column unique index
* on this table.
* This is useful for distinct elimination
* based on a uniqueness condition.
*
* @param tableColMap The columns to consider
*
* @return Whether or not the columns marked as true for one at least
* one table are a superset
*/
protected boolean supersetOfUniqueIndex(JBitSet[] tableColMap) throws StandardException {
ConglomerateDescriptor[] cds = tableDescriptor.getConglomerateDescriptors();
/* Cycle through the ConglomerateDescriptors */
for (int index = 0; index < cds.length; index++) {
ConglomerateDescriptor cd = cds[index];
if (!cd.isIndex()) {
continue;
}
IndexDescriptor id = cd.getIndexDescriptor();
if (!id.isUnique()) {
continue;
}
int[] keyColumns = id.baseColumnPositions();
int numBits = tableColMap[0].size();
JBitSet keyMap = new JBitSet(numBits);
JBitSet resMap = new JBitSet(numBits);
int inner = 0;
for (; inner < keyColumns.length; inner++) {
keyMap.set(keyColumns[inner]);
}
int table = 0;
for (; table < tableColMap.length; table++) {
resMap.setTo(tableColMap[table]);
resMap.and(keyMap);
if (keyMap.equals(resMap)) {
tableColMap[table].set(0);
return true;
}
}
}
return false;
}
use of org.apache.derby.catalog.IndexDescriptor in project derby by apache.
the class SYSCONGLOMERATESRowFactory method buildDescriptor.
// /////////////////////////////////////////////////////////////////////////
//
// ABSTRACT METHODS TO BE IMPLEMENTED BY CHILDREN OF CatalogRowFactory
//
// /////////////////////////////////////////////////////////////////////////
/**
* @param row a SYSCOLUMNS row
* @param parentTupleDescriptor Null for this kind of descriptor.
* @param dd dataDictionary
*
* @return a conglomerate descriptor equivalent to a SYSCONGOMERATES row
*
* @exception StandardException thrown on failure
*/
public TupleDescriptor buildDescriptor(ExecRow row, TupleDescriptor parentTupleDescriptor, DataDictionary dd) throws StandardException {
if (SanityManager.DEBUG)
SanityManager.ASSERT(row.nColumns() == SYSCONGLOMERATES_COLUMN_COUNT, "Wrong number of columns for a SYSCONGLOMERATES row");
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
long conglomerateNumber;
String name;
boolean isConstraint;
boolean isIndex;
IndexRowGenerator indexRowGenerator;
DataValueDescriptor col;
ConglomerateDescriptor conglomerateDesc;
String conglomUUIDString;
UUID conglomUUID;
String schemaUUIDString;
UUID schemaUUID;
String tableUUIDString;
UUID tableUUID;
/* 1st column is SCHEMAID (UUID - char(36)) */
col = row.getColumn(1);
schemaUUIDString = col.getString();
schemaUUID = getUUIDFactory().recreateUUID(schemaUUIDString);
/* 2nd column is TABLEID (UUID - char(36)) */
col = row.getColumn(2);
tableUUIDString = col.getString();
tableUUID = getUUIDFactory().recreateUUID(tableUUIDString);
/* 3nd column is CONGLOMERATENUMBER (long) */
col = row.getColumn(3);
conglomerateNumber = col.getLong();
/* 4rd column is CONGLOMERATENAME (varchar(128)) */
col = row.getColumn(4);
name = col.getString();
/* 5th column is ISINDEX (boolean) */
col = row.getColumn(5);
isIndex = col.getBoolean();
/* 6th column is DESCRIPTOR */
col = row.getColumn(6);
indexRowGenerator = new IndexRowGenerator((IndexDescriptor) col.getObject());
/* 7th column is ISCONSTRAINT (boolean) */
col = row.getColumn(7);
isConstraint = col.getBoolean();
/* 8th column is CONGLOMERATEID (UUID - char(36)) */
col = row.getColumn(8);
conglomUUIDString = col.getString();
conglomUUID = getUUIDFactory().recreateUUID(conglomUUIDString);
/* now build and return the descriptor */
conglomerateDesc = ddg.newConglomerateDescriptor(conglomerateNumber, name, isIndex, indexRowGenerator, isConstraint, conglomUUID, tableUUID, schemaUUID);
return conglomerateDesc;
}
Aggregations