use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class DDLSingleTableConstantAction method executeConglomReplacement.
/**
* Execute the received ConstantAction, which will create a
* new physical conglomerate (or find an existing physical
* conglomerate that is "sharable") to replace some dropped
* physical conglomerate. Then find any conglomerate descriptors
* which still reference the dropped physical conglomerate and
* update them all to have a conglomerate number that points
* to the conglomerate created by the ConstantAction.
*
* This method is called as part of DROP processing to handle
* cases where a physical conglomerate that was shared by
* multiple descriptors is dropped--in which case a new physical
* conglomerate must be created to support the remaining
* descriptors.
*
* @param replaceConglom Constant action which, when executed,
* will either create a new conglomerate or find an existing
* one that satisfies the ConstantAction's requirements.
* @param activation Activation used when creating the conglom
*/
void executeConglomReplacement(ConstantAction replaceConglom, Activation activation) throws StandardException {
CreateIndexConstantAction replaceConglomAction = (CreateIndexConstantAction) replaceConglom;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
// Create the new (replacment) backing conglomerate...
replaceConglomAction.executeConstantAction(activation);
/* Find all conglomerate descriptors that referenced the
* old backing conglomerate and update them to have the
* conglomerate number for the new backing conglomerate.
*/
ConglomerateDescriptor[] congDescs = dd.getConglomerateDescriptors(replaceConglomAction.getReplacedConglomNumber());
if (SanityManager.DEBUG) {
/* There should be at least one descriptor requiring
* an updated conglomerate number--namely, the one
* corresponding to "srcCD" for which the constant
* action was created (see getConglomReplacementAction()
* above). There may be others, as well.
*/
if (congDescs.length < 1) {
SanityManager.THROWASSERT("Should have found at least one conglomerate " + "descriptor that needs an updated conglomerate " + "number (due to a dropped index), but only " + "found " + congDescs.length);
}
}
dd.updateConglomerateDescriptor(congDescs, replaceConglomAction.getCreatedConglomNumber(), lcc.getTransactionExecute());
return;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class IndexChanger method getUniqueConstraintId.
// Return the id of the corresponding unique or primary key
// constraint. Note: this only works because deferrable constraints
// do not share an index with other constraints and explicit indexes, so the
// mapping back from index conglomerate to constraint is one-to-one.
private UUID getUniqueConstraintId() throws StandardException {
if (uniqueConstraintId == null) {
DataDictionary dd = lcc.getDataDictionary();
ConglomerateDescriptor cd = dd.getConglomerateDescriptor(indexCID);
uniqueConstraintId = dd.getConstraintDescriptor(dd.getTableDescriptor(cd.getTableID()), cd.getUUID()).getUUID();
}
return uniqueConstraintId;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class InsertResultSet method emptyIndexes.
/**
* Empty the indexes after doing a bulk insert replace
* where the table has 0 rows after the replace.
* RESOLVE: This method is ugly! Prior to 2.0, we simply
* scanned back across the table to build the indexes. We
* changed this in 2.0 to populate the sorters via a call back
* as we populated the table. Doing a 0 row replace into a
* table with indexes is a degenerate case, hence we allow
* ugly and unoptimized code.
*
* @exception StandardException Thrown on failure
*/
private void emptyIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException {
int numIndexes = constants.irgs.length;
ExecIndexRow[] idxRows = new ExecIndexRow[numIndexes];
ExecRow baseRows;
ColumnOrdering[][] order = new ColumnOrdering[numIndexes][];
int numColumns = td.getNumberOfColumns();
collation = new int[numIndexes][];
// Create the BitSet for mapping the partial row to the full row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
// Need to check each index for referenced columns
int numReferencedColumns = 0;
for (int index = 0; index < numIndexes; index++) {
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
for (int bcp = 0; bcp < baseColumnPositions.length; bcp++) {
if (!bitSet.get(baseColumnPositions[bcp])) {
bitSet.set(baseColumnPositions[bcp]);
numReferencedColumns++;
}
}
}
// We can finally create the partial base row
baseRows = activation.getExecutionFactory().getValueRow(numReferencedColumns);
// Fill in each base row with nulls of the correct data type
int colNumber = 0;
for (int index = 0; index < numColumns; index++) {
if (bitSet.get(index + 1)) {
colNumber++;
// NOTE: 1-based column numbers
baseRows.setColumn(colNumber, fullTemplate.getColumn(index + 1).cloneValue(false));
}
}
needToDropSort = new boolean[numIndexes];
sortIds = new long[numIndexes];
/* Do the initial set up before scanning the heap.
* For each index, build a single index row and a sorter.
*/
for (int index = 0; index < numIndexes; index++) {
// create a single index row template for each index
idxRows[index] = constants.irgs[index].getIndexRowTemplate();
// Get an index row based on the base row
// (This call is only necessary here because we need to pass a
// template to the sorter.)
constants.irgs[index].getIndexRow(baseRows, rl, idxRows[index], bitSet);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key
*/
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
boolean[] isAscending = constants.irgs[index].isAscending();
int numColumnOrderings;
SortObserver sortObserver;
final IndexRowGenerator indDes = cd.getIndexDescriptor();
if (indDes.isUnique() || indDes.isUniqueDeferrable()) {
numColumnOrderings = indDes.isUnique() ? baseColumnPositions.length : baseColumnPositions.length + 1;
String indexOrConstraintName = cd.getConglomerateName();
boolean deferred = false;
boolean uniqueDeferrable = false;
UUID uniqueDeferrableConstraintId = null;
if (cd.isConstraint()) {
// so, the index is backing up a constraint
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
deferred = lcc.isEffectivelyDeferred(lcc.getCurrentSQLSessionContext(activation), conDesc.getUUID());
uniqueDeferrable = conDesc.deferrable();
uniqueDeferrableConstraintId = conDesc.getUUID();
}
sortObserver = new UniqueIndexSortObserver(lcc, uniqueDeferrableConstraintId, // don't clone rows
false, uniqueDeferrable, deferred, indexOrConstraintName, idxRows[index], true, td.getName());
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(false, false, idxRows[index], true);
}
order[index] = new ColumnOrdering[numColumnOrderings];
for (int ii = 0; ii < isAscending.length; ii++) {
order[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
if (numColumnOrderings > isAscending.length) {
order[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
}
// create the sorters
sortIds[index] = tc.createSort((Properties) null, idxRows[index].getRowArrayClone(), order[index], sortObserver, // not in order
false, // est rows
rowCount, // est row size, -1 means no idea
-1);
needToDropSort[index] = true;
}
// Populate sorters and get the output of each sorter into a row
// source. The sorters have the indexed columns only and the columns
// are in the correct order.
rowSources = new RowLocationRetRowSource[numIndexes];
// Fill in the RowSources
SortController[] sorter = new SortController[numIndexes];
for (int index = 0; index < numIndexes; index++) {
sorter[index] = tc.openSort(sortIds[index]);
sorter[index].completedInserts();
rowSources[index] = tc.openSortRowSource(sortIds[index]);
}
long[] newIndexCongloms = new long[numIndexes];
// Populate each index
for (int index = 0; index < numIndexes; index++) {
ConglomerateController indexCC;
Properties properties = new Properties();
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
// Build the properties list for the new conglomerate
indexCC = tc.openCompiledConglomerate(false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.indexSCOCIs[index], indexDCOCIs[index]);
// Get the properties on the old index
indexCC.getInternalTablePropertySet(properties);
/* Create the properties that language supplies when creating the
* the index. (The store doesn't preserve these.)
*/
int indexRowLength = idxRows[index].nColumns();
properties.put("baseConglomerateId", Long.toString(newHeapConglom));
if (cd.getIndexDescriptor().isUnique()) {
properties.put("nUniqueColumns", Integer.toString(indexRowLength - 1));
} else {
properties.put("nUniqueColumns", Integer.toString(indexRowLength));
}
if (cd.getIndexDescriptor().isUniqueWithDuplicateNulls() && !cd.getIndexDescriptor().hasDeferrableChecking()) {
properties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
}
properties.put("rowLocationColumn", Integer.toString(indexRowLength - 1));
properties.put("nKeyFields", Integer.toString(indexRowLength));
indexCC.close();
collation[index] = constants.irgs[index].getColumnCollationIds(td.getColumnDescriptorList());
// We can finally drain the sorter and rebuild the index
// Populate the index.
newIndexCongloms[index] = tc.createAndLoadConglomerate("BTREE", idxRows[index].getRowArray(), // default column sort order
null, collation[index], properties, TransactionController.IS_DEFAULT, rowSources[index], (long[]) null);
/* Update the DataDictionary
*
* Update sys.sysconglomerates with new conglomerate #, if the
* conglomerate is shared by duplicate indexes, all the descriptors
* for those indexes need to be updated with the new number.
*/
dd.updateConglomerateDescriptor(td.getConglomerateDescriptors(constants.indexCIDS[index]), newIndexCongloms[index], tc);
// Drop the old conglomerate
tc.dropConglomerate(constants.indexCIDS[index]);
}
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class DropIndexConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for DROP INDEX.
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
ConglomerateDescriptor cd;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
// older version (or target) has to get td first, potential deadlock
if (tableConglomerateId == 0) {
td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tableName);
}
tableConglomerateId = td.getHeapConglomerateId();
}
lockTableForDDL(tc, tableConglomerateId, true);
td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tableName);
}
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, true);
/* Get the conglomerate descriptor for the index, along
* with an exclusive row lock on the row in sys.sysconglomerates
* in order to ensure that no one else compiles against the
* index.
*/
cd = dd.getConglomerateDescriptor(indexName, sd, true);
if (cd == null) {
throw StandardException.newException(SQLState.LANG_INDEX_NOT_FOUND_DURING_EXECUTION, fullIndexName);
}
/* Since we support the sharing of conglomerates across
* multiple indexes, dropping the physical conglomerate
* for the index might affect other indexes/constraints
* which share the conglomerate. The following call will
* deal with that situation by creating a new physical
* conglomerate to replace the dropped one, if necessary.
*/
dropConglomerate(cd, td, activation, lcc);
return;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class DropTableConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for DROP TABLE.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID tableID;
ConglomerateDescriptor[] cds;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
if ((sd != null) && sd.getSchemaName().equals(SchemaDescriptor.STD_DECLARED_GLOBAL_TEMPORARY_TABLES_SCHEMA_NAME)) {
// check if this is a temp table before checking data dictionary
td = lcc.getTableDescriptorForDeclaredGlobalTempTable(tableName);
if (// td null here means it is not a temporary table. Look for table in physical SESSION schema
td == null)
td = dd.getTableDescriptor(tableName, sd, tc);
if (// td null means tableName is not a temp table and it is not a physical table in SESSION schema
td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, fullTableName);
}
if (td.getTableType() == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
dm.invalidateFor(td, DependencyManager.DROP_TABLE, lcc);
tc.dropConglomerate(td.getHeapConglomerateId());
lcc.dropDeclaredGlobalTempTable(tableName);
return;
}
}
/* Lock the table before we access the data dictionary
* to prevent deadlocks.
*
* Note that for DROP TABLE replayed at Targets during REFRESH,
* the conglomerateNumber will be 0. That's ok. During REFRESH,
* we don't need to lock the conglomerate.
*/
if (conglomerateNumber != 0) {
lockTableForDDL(tc, conglomerateNumber, true);
}
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/* Get the table descriptor. */
td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, fullTableName);
}
/* Get an exclusive table lock on the table. */
long heapId = td.getHeapConglomerateId();
lockTableForDDL(tc, heapId, true);
/* Drop the triggers */
for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
trd.drop(lcc);
}
/* Drop all defaults */
ColumnDescriptorList cdl = td.getColumnDescriptorList();
for (ColumnDescriptor cd : cdl) {
//
if (cd.isAutoincrement() && dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, null)) {
dropIdentitySequence(dd, td, activation);
}
// any dependencies
if (cd.getDefaultInfo() != null) {
DefaultDescriptor defaultDesc = cd.getDefaultDescriptor(dd);
dm.clearDependencies(lcc, defaultDesc);
}
}
/* Drop the columns */
dd.dropAllColumnDescriptors(tableId, tc);
/* Drop all table and column permission descriptors */
dd.dropAllTableAndColPermDescriptors(tableId, tc);
/* Drop the constraints */
dropAllConstraintDescriptors(td, activation);
/*
** Drop all the conglomerates. Drop the heap last, because the
** store needs it for locking the indexes when they are dropped.
*/
cds = td.getConglomerateDescriptors();
long[] dropped = new long[cds.length - 1];
int numDropped = 0;
for (int index = 0; index < cds.length; index++) {
ConglomerateDescriptor cd = cds[index];
/* if it's for an index, since similar indexes share one
* conglomerate, we only drop the conglomerate once
*/
if (cd.getConglomerateNumber() != heapId) {
long thisConglom = cd.getConglomerateNumber();
int i;
for (i = 0; i < numDropped; i++) {
if (dropped[i] == thisConglom)
break;
}
if (// not dropped
i == numDropped) {
dropped[numDropped++] = thisConglom;
tc.dropConglomerate(thisConglom);
dd.dropStatisticsDescriptors(td.getUUID(), cd.getUUID(), tc);
}
}
}
/* Prepare all dependents to invalidate. (This is there chance
* to say that they can't be invalidated. For example, an open
* cursor referencing a table/view that the user is attempting to
* drop.) If no one objects, then invalidate any dependent objects.
* We check for invalidation before we drop the table descriptor
* since the table descriptor may be looked up as part of
* decoding tuples in SYSDEPENDS.
*/
dm.invalidateFor(td, DependencyManager.DROP_TABLE, lcc);
//
// The table itself can depend on the user defined types of its columns.
// Drop all of those dependencies now.
//
adjustUDTDependencies(lcc, dd, td, null, true);
/* Drop the table */
dd.dropTableDescriptor(td, sd, tc);
/* Drop the conglomerate descriptors */
dd.dropAllConglomerateDescriptors(td, tc);
/* Drop the store element at last, to prevent dangling reference
* for open cursor, beetle 4393.
*/
tc.dropConglomerate(heapId);
}
Aggregations