use of org.apache.derby.iapi.sql.dictionary.IndexRowGenerator in project derby by apache.
the class FromBaseTable method nextAccessPath.
/*
* Optimizable interface.
*/
/**
* @see org.apache.derby.iapi.sql.compile.Optimizable#nextAccessPath
*
* @exception StandardException Thrown on error
*/
@Override
public boolean nextAccessPath(Optimizer optimizer, OptimizablePredicateList predList, RowOrdering rowOrdering) throws StandardException {
String userSpecifiedIndexName = getUserSpecifiedIndexName();
AccessPath ap = getCurrentAccessPath();
ConglomerateDescriptor currentConglomerateDescriptor = ap.getConglomerateDescriptor();
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceNextAccessPath(getExposedName(), ((predList == null) ? 0 : predList.size()));
}
/*
** Remove the ordering of the current conglomerate descriptor,
** if any.
*/
rowOrdering.removeOptimizable(getTableNumber());
if (userSpecifiedIndexName != null) {
/*
** User specified an index name, so we should look at only one
** index. If there is a current conglomerate descriptor, and there
** are no more join strategies, we've already looked at the index,
** so go back to null.
*/
if (currentConglomerateDescriptor != null) {
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
currentConglomerateDescriptor = null;
}
} else {
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceLookingForSpecifiedIndex(userSpecifiedIndexName, tableNumber);
}
if (StringUtil.SQLToUpperCase(userSpecifiedIndexName).equals("NULL")) {
/* Special case - user-specified table scan */
currentConglomerateDescriptor = tableDescriptor.getConglomerateDescriptor(tableDescriptor.getHeapConglomerateId());
} else {
/* User-specified index name */
getConglomDescs();
for (int index = 0; index < conglomDescs.length; index++) {
currentConglomerateDescriptor = conglomDescs[index];
String conglomerateName = currentConglomerateDescriptor.getConglomerateName();
if (conglomerateName != null) {
/* Have we found the desired index? */
if (conglomerateName.equals(userSpecifiedIndexName)) {
break;
}
}
}
/* We should always find a match */
if (SanityManager.DEBUG) {
if (currentConglomerateDescriptor == null) {
SanityManager.THROWASSERT("Expected to find match for forced index " + userSpecifiedIndexName);
}
}
}
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("No join strategy found");
}
}
}
} else {
if (currentConglomerateDescriptor != null) {
/*
** Once we have a conglomerate descriptor, cycle through
** the join strategies (done in parent).
*/
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
/*
** When we're out of join strategies, go to the next
** conglomerate descriptor.
*/
currentConglomerateDescriptor = getNextConglom(currentConglomerateDescriptor);
/*
** New conglomerate, so step through join strategies
** again.
*/
resetJoinStrategies(optimizer);
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("No join strategy found");
}
}
}
} else {
/* Get the first conglomerate descriptor */
currentConglomerateDescriptor = getFirstConglom();
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("No join strategy found");
}
}
}
}
if (currentConglomerateDescriptor == null) {
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceNoMoreConglomerates(tableNumber);
}
} else {
currentConglomerateDescriptor.setColumnNames(columnNames);
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceConsideringConglomerate(currentConglomerateDescriptor, tableNumber);
}
}
/*
** Tell the rowOrdering that what the ordering of this conglomerate is
*/
if (currentConglomerateDescriptor != null) {
if (!currentConglomerateDescriptor.isIndex()) {
/* If we are scanning the heap, but there
* is a full match on a unique key, then
* we can say that the table IS NOT unordered.
* (We can't currently say what the ordering is
* though.)
*/
if (!isOneRowResultSet(predList)) {
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceAddingUnorderedOptimizable(((predList == null) ? 0 : predList.size()));
}
rowOrdering.addUnorderedOptimizable(this);
} else {
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceScanningHeapWithUniqueKey();
}
}
} else {
IndexRowGenerator irg = currentConglomerateDescriptor.getIndexDescriptor();
int[] baseColumnPositions = irg.baseColumnPositions();
boolean[] isAscending = irg.isAscending();
for (int i = 0; i < baseColumnPositions.length; i++) {
/*
** Don't add the column to the ordering if it's already
** an ordered column. This can happen in the following
** case:
**
** create index ti on t(x, y);
** select * from t where x = 1 order by y;
**
** Column x is always ordered, so we want to avoid the
** sort when using index ti. This is accomplished by
** making column y appear as the first ordered column
** in the list.
*/
if (!rowOrdering.orderedOnColumn(isAscending[i] ? RowOrdering.ASCENDING : RowOrdering.DESCENDING, getTableNumber(), baseColumnPositions[i])) {
rowOrdering.nextOrderPosition(isAscending[i] ? RowOrdering.ASCENDING : RowOrdering.DESCENDING);
rowOrdering.addOrderedColumn(isAscending[i] ? RowOrdering.ASCENDING : RowOrdering.DESCENDING, getTableNumber(), baseColumnPositions[i]);
}
}
}
}
ap.setConglomerateDescriptor(currentConglomerateDescriptor);
return currentConglomerateDescriptor != null;
}
use of org.apache.derby.iapi.sql.dictionary.IndexRowGenerator in project derby by apache.
the class DefaultOptTrace method reportConglomerateDescriptor.
private String reportConglomerateDescriptor(ConglomerateDescriptor cd) {
if (SanityManager.DEBUG) {
return cd.toString();
}
String keyString = "";
String[] columnNames = cd.getColumnNames();
if (cd.isIndex() && columnNames != null) {
IndexRowGenerator irg = cd.getIndexDescriptor();
int[] keyColumns = irg.baseColumnPositions();
keyString = ", key columns = {" + columnNames[keyColumns[0] - 1];
for (int index = 1; index < keyColumns.length; index++) {
keyString = keyString + ", " + columnNames[keyColumns[index] - 1];
}
keyString = keyString + "}";
}
return "CD: conglomerateNumber = " + cd.getConglomerateNumber() + " name = " + cd.getConglomerateName() + " uuid = " + cd.getUUID() + " indexable = " + cd.isIndex() + keyString;
}
use of org.apache.derby.iapi.sql.dictionary.IndexRowGenerator in project derby by apache.
the class InsertResultSet method emptyIndexes.
/**
* Empty the indexes after doing a bulk insert replace
* where the table has 0 rows after the replace.
* RESOLVE: This method is ugly! Prior to 2.0, we simply
* scanned back across the table to build the indexes. We
* changed this in 2.0 to populate the sorters via a call back
* as we populated the table. Doing a 0 row replace into a
* table with indexes is a degenerate case, hence we allow
* ugly and unoptimized code.
*
* @exception StandardException Thrown on failure
*/
private void emptyIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException {
int numIndexes = constants.irgs.length;
ExecIndexRow[] idxRows = new ExecIndexRow[numIndexes];
ExecRow baseRows;
ColumnOrdering[][] order = new ColumnOrdering[numIndexes][];
int numColumns = td.getNumberOfColumns();
collation = new int[numIndexes][];
// Create the BitSet for mapping the partial row to the full row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
// Need to check each index for referenced columns
int numReferencedColumns = 0;
for (int index = 0; index < numIndexes; index++) {
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
for (int bcp = 0; bcp < baseColumnPositions.length; bcp++) {
if (!bitSet.get(baseColumnPositions[bcp])) {
bitSet.set(baseColumnPositions[bcp]);
numReferencedColumns++;
}
}
}
// We can finally create the partial base row
baseRows = activation.getExecutionFactory().getValueRow(numReferencedColumns);
// Fill in each base row with nulls of the correct data type
int colNumber = 0;
for (int index = 0; index < numColumns; index++) {
if (bitSet.get(index + 1)) {
colNumber++;
// NOTE: 1-based column numbers
baseRows.setColumn(colNumber, fullTemplate.getColumn(index + 1).cloneValue(false));
}
}
needToDropSort = new boolean[numIndexes];
sortIds = new long[numIndexes];
/* Do the initial set up before scanning the heap.
* For each index, build a single index row and a sorter.
*/
for (int index = 0; index < numIndexes; index++) {
// create a single index row template for each index
idxRows[index] = constants.irgs[index].getIndexRowTemplate();
// Get an index row based on the base row
// (This call is only necessary here because we need to pass a
// template to the sorter.)
constants.irgs[index].getIndexRow(baseRows, rl, idxRows[index], bitSet);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key
*/
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
boolean[] isAscending = constants.irgs[index].isAscending();
int numColumnOrderings;
SortObserver sortObserver;
final IndexRowGenerator indDes = cd.getIndexDescriptor();
if (indDes.isUnique() || indDes.isUniqueDeferrable()) {
numColumnOrderings = indDes.isUnique() ? baseColumnPositions.length : baseColumnPositions.length + 1;
String indexOrConstraintName = cd.getConglomerateName();
boolean deferred = false;
boolean uniqueDeferrable = false;
UUID uniqueDeferrableConstraintId = null;
if (cd.isConstraint()) {
// so, the index is backing up a constraint
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
deferred = lcc.isEffectivelyDeferred(lcc.getCurrentSQLSessionContext(activation), conDesc.getUUID());
uniqueDeferrable = conDesc.deferrable();
uniqueDeferrableConstraintId = conDesc.getUUID();
}
sortObserver = new UniqueIndexSortObserver(lcc, uniqueDeferrableConstraintId, // don't clone rows
false, uniqueDeferrable, deferred, indexOrConstraintName, idxRows[index], true, td.getName());
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(false, false, idxRows[index], true);
}
order[index] = new ColumnOrdering[numColumnOrderings];
for (int ii = 0; ii < isAscending.length; ii++) {
order[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
if (numColumnOrderings > isAscending.length) {
order[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
}
// create the sorters
sortIds[index] = tc.createSort((Properties) null, idxRows[index].getRowArrayClone(), order[index], sortObserver, // not in order
false, // est rows
rowCount, // est row size, -1 means no idea
-1);
needToDropSort[index] = true;
}
// Populate sorters and get the output of each sorter into a row
// source. The sorters have the indexed columns only and the columns
// are in the correct order.
rowSources = new RowLocationRetRowSource[numIndexes];
// Fill in the RowSources
SortController[] sorter = new SortController[numIndexes];
for (int index = 0; index < numIndexes; index++) {
sorter[index] = tc.openSort(sortIds[index]);
sorter[index].completedInserts();
rowSources[index] = tc.openSortRowSource(sortIds[index]);
}
long[] newIndexCongloms = new long[numIndexes];
// Populate each index
for (int index = 0; index < numIndexes; index++) {
ConglomerateController indexCC;
Properties properties = new Properties();
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
// Build the properties list for the new conglomerate
indexCC = tc.openCompiledConglomerate(false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.indexSCOCIs[index], indexDCOCIs[index]);
// Get the properties on the old index
indexCC.getInternalTablePropertySet(properties);
/* Create the properties that language supplies when creating the
* the index. (The store doesn't preserve these.)
*/
int indexRowLength = idxRows[index].nColumns();
properties.put("baseConglomerateId", Long.toString(newHeapConglom));
if (cd.getIndexDescriptor().isUnique()) {
properties.put("nUniqueColumns", Integer.toString(indexRowLength - 1));
} else {
properties.put("nUniqueColumns", Integer.toString(indexRowLength));
}
if (cd.getIndexDescriptor().isUniqueWithDuplicateNulls() && !cd.getIndexDescriptor().hasDeferrableChecking()) {
properties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
}
properties.put("rowLocationColumn", Integer.toString(indexRowLength - 1));
properties.put("nKeyFields", Integer.toString(indexRowLength));
indexCC.close();
collation[index] = constants.irgs[index].getColumnCollationIds(td.getColumnDescriptorList());
// We can finally drain the sorter and rebuild the index
// Populate the index.
newIndexCongloms[index] = tc.createAndLoadConglomerate("BTREE", idxRows[index].getRowArray(), // default column sort order
null, collation[index], properties, TransactionController.IS_DEFAULT, rowSources[index], (long[]) null);
/* Update the DataDictionary
*
* Update sys.sysconglomerates with new conglomerate #, if the
* conglomerate is shared by duplicate indexes, all the descriptors
* for those indexes need to be updated with the new number.
*/
dd.updateConglomerateDescriptor(td.getConglomerateDescriptors(constants.indexCIDS[index]), newIndexCongloms[index], tc);
// Drop the old conglomerate
tc.dropConglomerate(constants.indexCIDS[index]);
}
}
use of org.apache.derby.iapi.sql.dictionary.IndexRowGenerator in project derby by apache.
the class AlterTableConstantAction method truncateTable.
/*
* TRUNCATE TABLE TABLENAME; (quickly removes all the rows from table and
* it's correctponding indexes).
* Truncate is implemented by dropping the existing conglomerates(heap,indexes) and recreating a
* new ones with the properties of dropped conglomerates. Currently Store
* does not have support to truncate existing conglomerated until store
* supports it , this is the only way to do it.
* Error Cases: Truncate error cases same as other DDL's statements except
* 1)Truncate is not allowed when the table is references by another table.
* 2)Truncate is not allowed when there are enabled delete triggers on the table.
* Note: Because conglomerate number is changed during recreate process all the statements will be
* marked as invalide and they will get recompiled internally on their next
* execution. This is okay because truncate makes the number of rows to zero
* it may be good idea to recompile them becuase plans are likely to be
* incorrect. Recompile is done internally by Derby, user does not have
* any effect.
*/
private void truncateTable() throws StandardException {
ExecRow emptyHeapRow;
long newHeapConglom;
Properties properties = new Properties();
RowLocation rl;
if (SanityManager.DEBUG) {
if (lockGranularity != '\0') {
SanityManager.THROWASSERT("lockGranularity expected to be '\0', not " + lockGranularity);
}
SanityManager.ASSERT(columnInfo == null, "columnInfo expected to be null");
SanityManager.ASSERT(constraintActions == null, "constraintActions expected to be null");
}
// and the ON DELETE action is NO ACTION.
for (ConstraintDescriptor cd : dd.getConstraintDescriptors(td)) {
if (cd instanceof ReferencedKeyConstraintDescriptor) {
final ReferencedKeyConstraintDescriptor rfcd = (ReferencedKeyConstraintDescriptor) cd;
for (ConstraintDescriptor fkcd : rfcd.getNonSelfReferencingFK(ConstraintDescriptor.ENABLED)) {
final ForeignKeyConstraintDescriptor fk = (ForeignKeyConstraintDescriptor) fkcd;
throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_FK_REFERENCE_TABLE, td.getName());
}
}
}
// truncate is not allowed when there are enabled DELETE triggers
for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
if (trd.listensForEvent(TriggerDescriptor.TRIGGER_EVENT_DELETE) && trd.isEnabled()) {
throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_ENABLED_DELETE_TRIGGERS, td.getName(), trd.getName());
}
}
// gather information from the existing conglomerate to create new one.
emptyHeapRow = td.getEmptyExecRow();
compressHeapCC = tc.openConglomerate(td.getHeapConglomerateId(), false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
rl = compressHeapCC.newRowLocationTemplate();
// Get the properties on the old heap
compressHeapCC.getInternalTablePropertySet(properties);
compressHeapCC.close();
compressHeapCC = null;
// create new conglomerate
newHeapConglom = tc.createConglomerate("heap", emptyHeapRow.getRowArray(), // column sort order - not required for heap
null, td.getColumnCollationIds(), properties, TransactionController.IS_DEFAULT);
/* Set up index info to perform truncate on them*/
getAffectedIndexes();
if (numIndexes > 0) {
indexRows = new ExecIndexRow[numIndexes];
ordering = new ColumnOrdering[numIndexes][];
collation = new int[numIndexes][];
for (int index = 0; index < numIndexes; index++) {
IndexRowGenerator curIndex = compressIRGs[index];
// create a single index row template for each index
indexRows[index] = curIndex.getIndexRowTemplate();
curIndex.getIndexRow(emptyHeapRow, rl, indexRows[index], (FormatableBitSet) null);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* No need to try to enforce uniqueness here as
* index should be valid.
*/
int[] baseColumnPositions = curIndex.baseColumnPositions();
boolean[] isAscending = curIndex.isAscending();
int numColumnOrderings;
numColumnOrderings = baseColumnPositions.length + 1;
ordering[index] = new ColumnOrdering[numColumnOrderings];
collation[index] = curIndex.getColumnCollationIds(td.getColumnDescriptorList());
for (int ii = 0; ii < numColumnOrderings - 1; ii++) {
ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
ordering[index][numColumnOrderings - 1] = new IndexColumnOrder(numColumnOrderings - 1);
}
}
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
// truncate all indexes
if (numIndexes > 0) {
long[] newIndexCongloms = new long[numIndexes];
for (int index = 0; index < numIndexes; index++) {
updateIndex(newHeapConglom, dd, index, newIndexCongloms);
}
}
// Update the DataDictionary
// Get the ConglomerateDescriptor for the heap
long oldHeapConglom = td.getHeapConglomerateId();
ConglomerateDescriptor cd = td.getConglomerateDescriptor(oldHeapConglom);
// Update sys.sysconglomerates with new conglomerate #
dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
// Now that the updated information is available in the system tables,
// we should invalidate all statements that use the old conglomerates
dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);
// Drop the old conglomerate
tc.dropConglomerate(oldHeapConglom);
cleanUp();
}
use of org.apache.derby.iapi.sql.dictionary.IndexRowGenerator in project derby by apache.
the class AlterTableConstantAction method getAffectedIndexes.
/**
* Get info on the indexes on the table being compressed.
*
* @exception StandardException Thrown on error
*/
private void getAffectedIndexes() throws StandardException {
IndexLister indexLister = td.getIndexLister();
/* We have to get non-distinct index row generaters and conglom numbers
* here and then compress it to distinct later because drop column
* will need to change the index descriptor directly on each index
* entry in SYSCONGLOMERATES, on duplicate indexes too.
*/
compressIRGs = indexLister.getIndexRowGenerators();
numIndexes = compressIRGs.length;
indexConglomerateNumbers = indexLister.getIndexConglomerateNumbers();
if (// then it's drop column
!(compressTable || truncateTable)) {
ArrayList<ConstantAction> newCongloms = new ArrayList<ConstantAction>();
for (int i = 0; i < compressIRGs.length; i++) {
int[] baseColumnPositions = compressIRGs[i].baseColumnPositions();
int j;
for (j = 0; j < baseColumnPositions.length; j++) if (baseColumnPositions[j] == droppedColumnPosition)
break;
if (// not related
j == baseColumnPositions.length)
continue;
if (baseColumnPositions.length == 1 || (behavior == StatementType.DROP_CASCADE && compressIRGs[i].isUnique())) {
numIndexes--;
/* get first conglomerate with this conglom number each time
* and each duplicate one will be eventually all dropped
*/
ConglomerateDescriptor cd = td.getConglomerateDescriptor(indexConglomerateNumbers[i]);
dropConglomerate(cd, td, true, newCongloms, activation, activation.getLanguageConnectionContext());
// mark it
compressIRGs[i] = null;
continue;
}
// a constraint, because constraints have already been handled
if (compressIRGs[i].isUnique()) {
ConglomerateDescriptor cd = td.getConglomerateDescriptor(indexConglomerateNumbers[i]);
throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(DependencyManager.DROP_COLUMN), columnInfo[0].name, "UNIQUE INDEX", cd.getConglomerateName());
}
}
/* If there are new backing conglomerates which must be
* created to replace a dropped shared conglomerate
* (where the shared conglomerate was dropped as part
* of a "drop conglomerate" call above), then create
* them now. We do this *after* dropping all dependent
* conglomerates because we don't want to waste time
* creating a new conglomerate if it's just going to be
* dropped again as part of another "drop conglomerate"
* call.
*/
createNewBackingCongloms(newCongloms, indexConglomerateNumbers);
IndexRowGenerator[] newIRGs = new IndexRowGenerator[numIndexes];
long[] newIndexConglomNumbers = new long[numIndexes];
collation = new int[numIndexes][];
for (int i = 0, j = 0; i < numIndexes; i++, j++) {
while (compressIRGs[j] == null) j++;
// Setup collation id array to be passed in on call to create index.
collation[i] = compressIRGs[j].getColumnCollationIds(td.getColumnDescriptorList());
int[] baseColumnPositions = compressIRGs[j].baseColumnPositions();
newIRGs[i] = compressIRGs[j];
newIndexConglomNumbers[i] = indexConglomerateNumbers[j];
boolean[] isAscending = compressIRGs[j].isAscending();
boolean reMakeArrays = false;
boolean rewriteBaseColumnPositions = false;
int size = baseColumnPositions.length;
for (int k = 0; k < size; k++) {
if (baseColumnPositions[k] > droppedColumnPosition) {
baseColumnPositions[k]--;
rewriteBaseColumnPositions = true;
} else if (baseColumnPositions[k] == droppedColumnPosition) {
// mark it
baseColumnPositions[k] = 0;
reMakeArrays = true;
}
}
if (rewriteBaseColumnPositions) {
compressIRGs[j].setBaseColumnPositions(baseColumnPositions);
}
if (reMakeArrays) {
size--;
int[] newBCP = new int[size];
boolean[] newIsAscending = new boolean[size];
int[] newCollation = new int[collation[i].length - 1];
for (int k = 0, step = 0; k < size; k++) {
if (step == 0 && baseColumnPositions[k + step] == 0)
step++;
newBCP[k] = baseColumnPositions[k + step];
newIsAscending[k] = isAscending[k + step];
newCollation[k] = collation[i][k + step];
}
IndexDescriptor id = compressIRGs[j].getIndexDescriptor();
id.setBaseColumnPositions(newBCP);
id.setIsAscending(newIsAscending);
id.setNumberOfOrderedColumns(id.numberOfOrderedColumns() - 1);
collation[i] = newCollation;
}
}
compressIRGs = newIRGs;
indexConglomerateNumbers = newIndexConglomNumbers;
} else {
collation = new int[numIndexes][];
for (int i = 0; i < numIndexes; i++) {
collation[i] = compressIRGs[i].getColumnCollationIds(td.getColumnDescriptorList());
}
}
/* Now we are done with updating each index descriptor entry directly
* in SYSCONGLOMERATES (for duplicate index as well), from now on, our
* work should apply ONLY once for each real conglomerate, so we
* compress any duplicate indexes now.
*/
Object[] compressIndexResult = compressIndexArrays(indexConglomerateNumbers, compressIRGs);
if (compressIndexResult != null) {
indexConglomerateNumbers = (long[]) compressIndexResult[1];
compressIRGs = (IndexRowGenerator[]) compressIndexResult[2];
numIndexes = indexConglomerateNumbers.length;
}
indexedCols = new FormatableBitSet(compressTable || truncateTable ? td.getNumberOfColumns() + 1 : td.getNumberOfColumns());
for (int index = 0; index < numIndexes; index++) {
int[] colIds = compressIRGs[index].getIndexDescriptor().baseColumnPositions();
for (int index2 = 0; index2 < colIds.length; index2++) {
indexedCols.set(colIds[index2]);
}
}
}
Aggregations