use of org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList in project derby by apache.
the class DropTableConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for DROP TABLE.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID tableID;
ConglomerateDescriptor[] cds;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
if ((sd != null) && sd.getSchemaName().equals(SchemaDescriptor.STD_DECLARED_GLOBAL_TEMPORARY_TABLES_SCHEMA_NAME)) {
// check if this is a temp table before checking data dictionary
td = lcc.getTableDescriptorForDeclaredGlobalTempTable(tableName);
if (// td null here means it is not a temporary table. Look for table in physical SESSION schema
td == null)
td = dd.getTableDescriptor(tableName, sd, tc);
if (// td null means tableName is not a temp table and it is not a physical table in SESSION schema
td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, fullTableName);
}
if (td.getTableType() == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
dm.invalidateFor(td, DependencyManager.DROP_TABLE, lcc);
tc.dropConglomerate(td.getHeapConglomerateId());
lcc.dropDeclaredGlobalTempTable(tableName);
return;
}
}
/* Lock the table before we access the data dictionary
* to prevent deadlocks.
*
* Note that for DROP TABLE replayed at Targets during REFRESH,
* the conglomerateNumber will be 0. That's ok. During REFRESH,
* we don't need to lock the conglomerate.
*/
if (conglomerateNumber != 0) {
lockTableForDDL(tc, conglomerateNumber, true);
}
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/* Get the table descriptor. */
td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, fullTableName);
}
/* Get an exclusive table lock on the table. */
long heapId = td.getHeapConglomerateId();
lockTableForDDL(tc, heapId, true);
/* Drop the triggers */
for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
trd.drop(lcc);
}
/* Drop all defaults */
ColumnDescriptorList cdl = td.getColumnDescriptorList();
for (ColumnDescriptor cd : cdl) {
//
if (cd.isAutoincrement() && dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, null)) {
dropIdentitySequence(dd, td, activation);
}
// any dependencies
if (cd.getDefaultInfo() != null) {
DefaultDescriptor defaultDesc = cd.getDefaultDescriptor(dd);
dm.clearDependencies(lcc, defaultDesc);
}
}
/* Drop the columns */
dd.dropAllColumnDescriptors(tableId, tc);
/* Drop all table and column permission descriptors */
dd.dropAllTableAndColPermDescriptors(tableId, tc);
/* Drop the constraints */
dropAllConstraintDescriptors(td, activation);
/*
** Drop all the conglomerates. Drop the heap last, because the
** store needs it for locking the indexes when they are dropped.
*/
cds = td.getConglomerateDescriptors();
long[] dropped = new long[cds.length - 1];
int numDropped = 0;
for (int index = 0; index < cds.length; index++) {
ConglomerateDescriptor cd = cds[index];
/* if it's for an index, since similar indexes share one
* conglomerate, we only drop the conglomerate once
*/
if (cd.getConglomerateNumber() != heapId) {
long thisConglom = cd.getConglomerateNumber();
int i;
for (i = 0; i < numDropped; i++) {
if (dropped[i] == thisConglom)
break;
}
if (// not dropped
i == numDropped) {
dropped[numDropped++] = thisConglom;
tc.dropConglomerate(thisConglom);
dd.dropStatisticsDescriptors(td.getUUID(), cd.getUUID(), tc);
}
}
}
/* Prepare all dependents to invalidate. (This is there chance
* to say that they can't be invalidated. For example, an open
* cursor referencing a table/view that the user is attempting to
* drop.) If no one objects, then invalidate any dependent objects.
* We check for invalidation before we drop the table descriptor
* since the table descriptor may be looked up as part of
* decoding tuples in SYSDEPENDS.
*/
dm.invalidateFor(td, DependencyManager.DROP_TABLE, lcc);
//
// The table itself can depend on the user defined types of its columns.
// Drop all of those dependencies now.
//
adjustUDTDependencies(lcc, dd, td, null, true);
/* Drop the table */
dd.dropTableDescriptor(td, sd, tc);
/* Drop the conglomerate descriptors */
dd.dropAllConglomerateDescriptors(td, tc);
/* Drop the store element at last, to prevent dangling reference
* for open cursor, beetle 4393.
*/
tc.dropConglomerate(heapId);
}
use of org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList in project derby by apache.
the class UpdateResultSet method initializeAIcache.
/*
* The implementation of this method is slightly different than the one
* in InsertResultSet. This code was originally written for insert but
* with DERBY-6414, we have started supporting update of auto generated
* column with keyword DEFAULT. The reason of different implementation is
* that the array used in InsertResultSet's implementation of this method,
* ColumnDescriptors in resultDescription hold different entries for
* insert and update case. For insert case, the array holds the column
* descriptors of all the columns in the table. This is because all the
* columns in the table are going to get some value into them whether
* or not they were included directly in the actual INSERT statement.
* The 2nd array, rla has a spot for each of the columns in the table,
* with non null value for auto generated column. But in case of Update,
* resultDescription does not include all the columns in the table. It
* only has the columns being touched by the Update statement(the rest of
* the columns in the table will retain their original values), and for
* each of those touched columns, it has a duplicate entry in
* resultDescription in order to have before and after values for the
* changed column values. Lastly, it has a row location information for
* the row being updated. This difference in array content of
* resultDescription requires us to have separate implementation of this
* method for insert and update.
*/
protected void initializeAIcache(RowLocation[] rla) throws StandardException {
if (rla != null) {
aiCache = new DataValueDescriptor[rla.length];
ColumnDescriptorList columns = lcc.getDataDictionary().getTableDescriptor(constants.targetUUID).getColumnDescriptorList();
for (int i = 0; i < columns.size(); i++) {
if (rla[i] == null)
continue;
aiCache[i] = columns.elementAt(i).getType().getNull();
}
}
}
use of org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList in project derby by apache.
the class T_ConsistencyChecker method getHeapRowOfNulls.
/* Get a heap row full of nulls */
private ExecRow getHeapRowOfNulls() throws StandardException {
ConglomerateController baseCC;
ExecRow baseRow;
/* Open the heap for reading */
baseCC = tc.openConglomerate(td.getHeapConglomerateId(), false, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
/* Get a row template for the base table */
baseRow = lcc.getLanguageConnectionFactory().getExecutionFactory().getValueRow(td.getNumberOfColumns());
/* Fill the row with nulls of the correct type */
ColumnDescriptorList cdl = td.getColumnDescriptorList();
int cdlSize = cdl.size();
for (int index = 0; index < cdlSize; index++) {
ColumnDescriptor cd = (ColumnDescriptor) cdl.elementAt(index);
DataTypeDescriptor dts = cd.getType();
baseRow.setColumn(cd.getPosition(), dts.getNull());
}
baseCC.close();
return baseRow;
}
use of org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList in project derby by apache.
the class AlterTableConstantAction method dropColumnFromTable.
/**
* Workhorse for dropping a column from a table.
*
* This routine drops a column from a table, taking care
* to properly handle the various related schema objects.
*
* The syntax which gets you here is:
*
* ALTER TABLE tbl DROP [COLUMN] col [CASCADE|RESTRICT]
*
* The keyword COLUMN is optional, and if you don't
* specify CASCADE or RESTRICT, the default is CASCADE
* (the default is chosen in the parser, not here).
*
* If you specify RESTRICT, then the column drop should be
* rejected if it would cause a dependent schema object
* to become invalid.
*
* If you specify CASCADE, then the column drop should
* additionally drop other schema objects which have
* become invalid.
*
* You may not drop the last (only) column in a table.
*
* Schema objects of interest include:
* - views
* - triggers
* - constraints
* - check constraints
* - primary key constraints
* - foreign key constraints
* - unique key constraints
* - not null constraints
* - privileges
* - indexes
* - default values
*
* Dropping a column may also change the column position
* numbers of other columns in the table, which may require
* fixup of schema objects (such as triggers and column
* privileges) which refer to columns by column position number.
*
* Indexes are a bit interesting. The official SQL spec
* doesn't talk about indexes; they are considered to be
* an imlementation-specific performance optimization.
* The current Derby behavior is that:
* - CASCADE/RESTRICT doesn't matter for indexes
* - when a column is dropped, it is removed from any indexes
* which contain it.
* - if that column was the only column in the index, the
* entire index is dropped.
*
* @param columnName the name of the column specfication in the ALTER
* statement-- currently we allow only one.
* @exception StandardException thrown on failure.
*/
private void dropColumnFromTable(String columnName) throws StandardException {
boolean cascade = (behavior == StatementType.DROP_CASCADE);
// drop any generated columns which reference this column
ColumnDescriptorList generatedColumnList = td.getGeneratedColumns();
int generatedColumnCount = generatedColumnList.size();
ArrayList<String> cascadedDroppedColumns = new ArrayList<String>();
for (int i = 0; i < generatedColumnCount; i++) {
ColumnDescriptor generatedColumn = generatedColumnList.elementAt(i);
String[] referencedColumnNames = generatedColumn.getDefaultInfo().getReferencedColumnNames();
int referencedColumnCount = referencedColumnNames.length;
for (int j = 0; j < referencedColumnCount; j++) {
if (columnName.equals(referencedColumnNames[j])) {
String generatedColumnName = generatedColumn.getColumnName();
// we're trying to drop
if (!cascade) {
//
throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(DependencyManager.DROP_COLUMN), columnName, "GENERATED COLUMN", generatedColumnName);
} else {
cascadedDroppedColumns.add(generatedColumnName);
}
}
}
}
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
int cascadedDrops = cascadedDroppedColumns.size();
int sizeAfterCascadedDrops = td.getColumnDescriptorList().size() - cascadedDrops;
// can NOT drop a column if it is the only one in the table
if (sizeAfterCascadedDrops == 1) {
throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(DependencyManager.DROP_COLUMN), "THE *LAST* COLUMN " + columnName, "TABLE", td.getQualifiedName());
}
// now drop dependent generated columns
for (int i = 0; i < cascadedDrops; i++) {
String generatedColumnName = cascadedDroppedColumns.get(i);
activation.addWarning(StandardException.newWarning(SQLState.LANG_GEN_COL_DROPPED, generatedColumnName, td.getName()));
//
// We can only recurse 2 levels since a generation clause cannot
// refer to other generated columns.
//
dropColumnFromTable(generatedColumnName);
}
/*
* Cascaded drops of dependent generated columns may require us to
* rebuild the table descriptor.
*/
td = dd.getTableDescriptor(tableId);
ColumnDescriptor columnDescriptor = td.getColumnDescriptor(columnName);
// We already verified this in bind, but do it again
if (columnDescriptor == null) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, columnName, td.getQualifiedName());
}
int size = td.getColumnDescriptorList().size();
droppedColumnPosition = columnDescriptor.getPosition();
FormatableBitSet toDrop = new FormatableBitSet(size + 1);
toDrop.set(droppedColumnPosition);
td.setReferencedColumnMap(toDrop);
dm.invalidateFor(td, (cascade ? DependencyManager.DROP_COLUMN : DependencyManager.DROP_COLUMN_RESTRICT), lcc);
// If column has a default we drop the default and any dependencies
if (columnDescriptor.getDefaultInfo() != null) {
dm.clearDependencies(lcc, columnDescriptor.getDefaultDescriptor(dd));
}
// then we need to drop the system-generated sequence backing it.
if (columnDescriptor.isAutoincrement() && dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, null)) {
DropTableConstantAction.dropIdentitySequence(dd, td, activation);
}
// columns which are used through REFERENCING clause
for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
// If we find that the trigger is dependent on the column being
// dropped because column is part of trigger columns list, then
// we will give a warning or drop the trigger based on whether
// ALTER TABLE DROP COLUMN is RESTRICT or CASCADE. In such a
// case, no need to check if the trigger action columns referenced
// through REFERENCING clause also used the column being dropped.
boolean triggerDroppedAlready = false;
int[] referencedCols = trd.getReferencedCols();
if (referencedCols != null) {
int refColLen = referencedCols.length, j;
boolean changed = false;
for (j = 0; j < refColLen; j++) {
if (referencedCols[j] > droppedColumnPosition) {
// Trigger is not defined on the column being dropped
// but the column position of trigger column is changing
// because the position of the column being dropped is
// before the the trigger column
changed = true;
} else if (referencedCols[j] == droppedColumnPosition) {
// the trigger is defined on the column being dropped
if (cascade) {
trd.drop(lcc);
triggerDroppedAlready = true;
activation.addWarning(StandardException.newWarning(SQLState.LANG_TRIGGER_DROPPED, trd.getName(), td.getName()));
} else {
// otherwsie there would be unexpected behaviors
throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(DependencyManager.DROP_COLUMN), columnName, "TRIGGER", trd.getName());
}
break;
}
}
// drop column.
if (j == refColLen && changed) {
dd.dropTriggerDescriptor(trd, tc);
for (j = 0; j < refColLen; j++) {
if (referencedCols[j] > droppedColumnPosition)
referencedCols[j]--;
}
trd.setReferencedCols(referencedCols);
dd.addDescriptor(trd, sd, DataDictionary.SYSTRIGGERS_CATALOG_NUM, false, tc);
}
}
// loop above, then move to next trigger
if (triggerDroppedAlready)
continue;
// Column being dropped is not one of trigger columns. Check if
// that column is getting used inside the trigger action through
// REFERENCING clause. This can be tracked only for triggers
// created in 10.7 and higher releases. Derby releases prior to
// that did not keep track of trigger action columns used
// through the REFERENCING clause.
int[] referencedColsInTriggerAction = trd.getReferencedColsInTriggerAction();
if (referencedColsInTriggerAction != null) {
int refColInTriggerActionLen = referencedColsInTriggerAction.length, j;
boolean changedColPositionInTriggerAction = false;
for (j = 0; j < refColInTriggerActionLen; j++) {
if (referencedColsInTriggerAction[j] > droppedColumnPosition) {
changedColPositionInTriggerAction = true;
} else if (referencedColsInTriggerAction[j] == droppedColumnPosition) {
if (cascade) {
trd.drop(lcc);
triggerDroppedAlready = true;
activation.addWarning(StandardException.newWarning(SQLState.LANG_TRIGGER_DROPPED, trd.getName(), td.getName()));
} else {
// we'd better give an error if don't drop it,
throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(DependencyManager.DROP_COLUMN), columnName, "TRIGGER", trd.getName());
}
break;
}
}
// column has been actually dropped from the table descriptor.
if (j == refColInTriggerActionLen && changedColPositionInTriggerAction) {
dd.dropTriggerDescriptor(trd, tc);
for (j = 0; j < refColInTriggerActionLen; j++) {
if (referencedColsInTriggerAction[j] > droppedColumnPosition)
referencedColsInTriggerAction[j]--;
}
trd.setReferencedColsInTriggerAction(referencedColsInTriggerAction);
dd.addDescriptor(trd, sd, DataDictionary.SYSTRIGGERS_CATALOG_NUM, false, tc);
}
}
}
ConstraintDescriptorList csdl = dd.getConstraintDescriptors(td);
int csdl_size = csdl.size();
ArrayList<ConstantAction> newCongloms = new ArrayList<ConstantAction>();
// we want to remove referenced primary/unique keys in the second
// round. This will ensure that self-referential constraints will
// work OK.
int tbr_size = 0;
ConstraintDescriptor[] toBeRemoved = new ConstraintDescriptor[csdl_size];
// let's go downwards, don't want to get messed up while removing
for (int i = csdl_size - 1; i >= 0; i--) {
ConstraintDescriptor cd = csdl.elementAt(i);
int[] referencedColumns = cd.getReferencedColumns();
int numRefCols = referencedColumns.length, j;
boolean changed = false;
for (j = 0; j < numRefCols; j++) {
if (referencedColumns[j] > droppedColumnPosition)
changed = true;
if (referencedColumns[j] == droppedColumnPosition)
break;
}
if (// column not referenced
j == numRefCols) {
if ((cd instanceof CheckConstraintDescriptor) && changed) {
dd.dropConstraintDescriptor(cd, tc);
for (j = 0; j < numRefCols; j++) {
if (referencedColumns[j] > droppedColumnPosition)
referencedColumns[j]--;
}
((CheckConstraintDescriptor) cd).setReferencedColumnsDescriptor(new ReferencedColumnsDescriptorImpl(referencedColumns));
dd.addConstraintDescriptor(cd, tc);
}
continue;
}
if (!cascade) {
//
throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(DependencyManager.DROP_COLUMN), columnName, "CONSTRAINT", cd.getConstraintName());
}
if (cd instanceof ReferencedKeyConstraintDescriptor) {
// restrict will raise an error in invalidate if referenced
toBeRemoved[tbr_size++] = cd;
continue;
}
// drop now in all other cases
dm.invalidateFor(cd, DependencyManager.DROP_CONSTRAINT, lcc);
dropConstraint(cd, td, newCongloms, activation, lcc, true);
activation.addWarning(StandardException.newWarning(SQLState.LANG_CONSTRAINT_DROPPED, cd.getConstraintName(), td.getName()));
}
for (int i = tbr_size - 1; i >= 0; i--) {
ConstraintDescriptor cd = toBeRemoved[i];
dropConstraint(cd, td, newCongloms, activation, lcc, false);
activation.addWarning(StandardException.newWarning(SQLState.LANG_CONSTRAINT_DROPPED, cd.getConstraintName(), td.getName()));
if (cascade) {
ConstraintDescriptorList fkcdl = dd.getForeignKeys(cd.getUUID());
for (ConstraintDescriptor fkcd : fkcdl) {
dm.invalidateFor(fkcd, DependencyManager.DROP_CONSTRAINT, lcc);
dropConstraint(fkcd, td, newCongloms, activation, lcc, true);
activation.addWarning(StandardException.newWarning(SQLState.LANG_CONSTRAINT_DROPPED, fkcd.getConstraintName(), fkcd.getTableDescriptor().getName()));
}
}
dm.invalidateFor(cd, DependencyManager.DROP_CONSTRAINT, lcc);
dm.clearDependencies(lcc, cd);
}
/* If there are new backing conglomerates which must be
* created to replace a dropped shared conglomerate
* (where the shared conglomerate was dropped as part
* of a "drop constraint" call above), then create them
* now. We do this *after* dropping all dependent
* constraints because we don't want to waste time
* creating a new conglomerate if it's just going to be
* dropped again as part of another "drop constraint".
*/
createNewBackingCongloms(newCongloms, (long[]) null);
/*
* The work we've done above, specifically the possible
* dropping of primary key, foreign key, and unique constraints
* and their underlying indexes, may have affected the table
* descriptor. By re-reading the table descriptor here, we
* ensure that the compressTable code is working with an
* accurate table descriptor. Without this line, we may get
* conglomerate-not-found errors and the like due to our
* stale table descriptor.
*/
td = dd.getTableDescriptor(tableId);
compressTable();
ColumnDescriptorList tab_cdl = td.getColumnDescriptorList();
// drop the column from syscolumns
dd.dropColumnDescriptor(td.getUUID(), columnName, tc);
ColumnDescriptor[] cdlArray = new ColumnDescriptor[size - columnDescriptor.getPosition()];
//
for (int i = columnDescriptor.getPosition(), j = 0; i < size; i++, j++) {
ColumnDescriptor cd = tab_cdl.elementAt(i);
dd.dropColumnDescriptor(td.getUUID(), cd.getColumnName(), tc);
cd.setPosition(i);
if (cd.isAutoincrement()) {
cd.setAutoinc_create_or_modify_Start_Increment(ColumnDefinitionNode.CREATE_AUTOINCREMENT);
}
cdlArray[j] = cd;
}
dd.addDescriptorArray(cdlArray, td, DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
// By this time, the column has been removed from the table descriptor.
// Now, go through all the triggers and regenerate their trigger action
// SPS and rebind the generated trigger action sql. If the trigger
// action is using the dropped column, it will get detected here. If
// not, then we will have generated the internal trigger action sql
// which matches the trigger action sql provided by the user.
//
// eg of positive test case
// create table atdc_16_tab1 (a1 integer, b1 integer, c1 integer);
// create table atdc_16_tab2 (a2 integer, b2 integer, c2 integer);
// create trigger atdc_16_trigger_1
// after update of b1 on atdc_16_tab1
// REFERENCING NEW AS newt
// for each row
// update atdc_16_tab2 set c2 = newt.c1
// The internal representation for the trigger action before the column
// is dropped is as follows
// update atdc_16_tab2 set c2 =
// org.apache.derby.iapi.db.Factory::getTriggerExecutionContext().
// getONewRow().getInt(3)
// After the drop column shown as below
// alter table DERBY4998_SOFT_UPGRADE_RESTRICT drop column c11
// The above internal representation of tigger action sql is not
// correct anymore because column position of c1 in atdc_16_tab1 has
// now changed from 3 to 2. Following while loop will regenerate it and
// change it to as follows
// update atdc_16_tab2 set c2 =
// org.apache.derby.iapi.db.Factory::getTriggerExecutionContext().
// getONewRow().getInt(2)
//
// We could not do this before the actual column drop, because the
// rebind would have still found the column being dropped in the
// table descriptor and hence use of such a column in the trigger
// action rebind would not have been caught.
// For the table on which ALTER TABLE is getting performed, find out
// all the SPSDescriptors that use that table as a provider. We are
// looking for SPSDescriptors that have been created internally for
// trigger action SPSes. Through those SPSDescriptors, we will be
// able to get to the triggers dependent on the table being altered
// Following will get all the dependent objects that are using
// ALTER TABLE table as provider
List<DependencyDescriptor> depsOnAlterTableList = dd.getProvidersDescriptorList(td.getObjectID().toString());
for (DependencyDescriptor depOnAT : depsOnAlterTableList) {
// Go through all the dependent objects on the table being altered
DependableFinder dependent = depOnAT.getDependentFinder();
// stored prepared statement.
if (dependent.getSQLObjectType().equals(Dependable.STORED_PREPARED_STATEMENT)) {
// Look for all the dependent objects that are using this
// stored prepared statement as provider. We are only
// interested in dependents that are triggers.
List<DependencyDescriptor> depsTrigger = dd.getProvidersDescriptorList(depOnAT.getUUID().toString());
for (DependencyDescriptor depsTriggerDesc : depsTrigger) {
DependableFinder providerIsTrigger = depsTriggerDesc.getDependentFinder();
// it is a trigger
if (providerIsTrigger.getSQLObjectType().equals(Dependable.TRIGGER)) {
// Drop and recreate the trigger after regenerating
// it's trigger action plan. If the trigger action
// depends on the column being dropped, it will be
// caught here.
TriggerDescriptor trdToBeDropped = dd.getTriggerDescriptor(depsTriggerDesc.getUUID());
// First check for dependencies in the trigger's WHEN
// clause, if there is one.
UUID whenClauseId = trdToBeDropped.getWhenClauseId();
boolean gotDropped = false;
if (whenClauseId != null) {
gotDropped = columnDroppedAndTriggerDependencies(trdToBeDropped, whenClauseId, true, cascade, columnName);
}
// dependencies.
if (!gotDropped) {
columnDroppedAndTriggerDependencies(trdToBeDropped, trdToBeDropped.getActionId(), false, cascade, columnName);
}
}
}
}
}
// Adjust the column permissions rows in SYSCOLPERMS to reflect the
// changed column positions due to the dropped column:
dd.updateSYSCOLPERMSforDropColumn(td.getUUID(), tc, columnDescriptor);
// remove column descriptor from table descriptor. this fixes up the
// list in case we were called recursively in order to cascade-drop a
// dependent generated column.
tab_cdl.remove(td.getColumnDescriptor(columnName));
}
use of org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList in project derby by apache.
the class CreateIndexConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for
* creating an index.
*
* <P>
* A index is represented as:
* <UL>
* <LI> ConglomerateDescriptor.
* </UL>
* No dependencies are created.
*
* @see ConglomerateDescriptor
* @see SchemaDescriptor
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID toid;
ColumnDescriptor columnDescriptor;
int[] baseColumnPositions;
IndexRowGenerator indexRowGenerator = null;
ExecRow[] baseRows;
ExecIndexRow[] indexRows;
ExecRow[] compactBaseRows;
GroupFetchScanController scan;
RowLocationRetRowSource rowSource;
long sortId;
int maxBaseColumnPosition = -1;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, true);
/* Get the table descriptor. */
/* See if we can get the TableDescriptor
* from the Activation. (Will be there
* for backing indexes.)
*/
td = activation.getDDLTableDescriptor();
if (td == null) {
/* tableId will be non-null if adding an index to
* an existing table (as opposed to creating a
* table with a constraint with a backing index).
*/
if (tableId != null) {
td = dd.getTableDescriptor(tableId);
} else {
td = dd.getTableDescriptor(tableName, sd, tc);
}
}
if (td == null) {
throw StandardException.newException(SQLState.LANG_CREATE_INDEX_NO_TABLE, indexName, tableName);
}
if (td.getTableType() == TableDescriptor.SYSTEM_TABLE_TYPE) {
throw StandardException.newException(SQLState.LANG_CREATE_SYSTEM_INDEX_ATTEMPTED, indexName, tableName);
}
/* Get a shared table lock on the table. We need to lock table before
* invalidate dependents, otherwise, we may interfere with the
* compilation/re-compilation of DML/DDL. See beetle 4325 and $WS/
* docs/language/SolutionsToConcurrencyIssues.txt (point f).
*/
lockTableForDDL(tc, td.getHeapConglomerateId(), false);
// depended on this table (including this one)
if (!forCreateTable) {
dm.invalidateFor(td, DependencyManager.CREATE_INDEX, lcc);
}
// Translate the base column names to column positions
baseColumnPositions = new int[columnNames.length];
for (int i = 0; i < columnNames.length; i++) {
// Look up the column in the data dictionary
columnDescriptor = td.getColumnDescriptor(columnNames[i]);
if (columnDescriptor == null) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, columnNames[i], tableName);
}
TypeId typeId = columnDescriptor.getType().getTypeId();
// Don't allow a column to be created on a non-orderable type
ClassFactory cf = lcc.getLanguageConnectionFactory().getClassFactory();
boolean isIndexable = typeId.orderable(cf);
if (isIndexable && typeId.userType()) {
String userClass = typeId.getCorrespondingJavaTypeName();
// run the compare method.
try {
if (cf.isApplicationClass(cf.loadApplicationClass(userClass)))
isIndexable = false;
} catch (ClassNotFoundException cnfe) {
// shouldn't happen as we just check the class is orderable
isIndexable = false;
}
}
if (!isIndexable) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_ORDERABLE_DURING_EXECUTION, typeId.getSQLTypeName());
}
// Remember the position in the base table of each column
baseColumnPositions[i] = columnDescriptor.getPosition();
if (maxBaseColumnPosition < baseColumnPositions[i])
maxBaseColumnPosition = baseColumnPositions[i];
}
/* The code below tries to determine if the index that we're about
* to create can "share" a conglomerate with an existing index.
* If so, we will use a single physical conglomerate--namely, the
* one that already exists--to support both indexes. I.e. we will
* *not* create a new conglomerate as part of this constant action.
*
* Deferrable constraints are backed by indexes that are *not* shared
* since they use physically non-unique indexes and as such are
* different from indexes used to represent non-deferrable
* constraints.
*/
// check if we have similar indices already for this table
ConglomerateDescriptor[] congDescs = td.getConglomerateDescriptors();
boolean shareExisting = false;
for (int i = 0; i < congDescs.length; i++) {
ConglomerateDescriptor cd = congDescs[i];
if (!cd.isIndex())
continue;
if (droppedConglomNum == cd.getConglomerateNumber()) {
/* We can't share with any conglomerate descriptor
* whose conglomerate number matches the dropped
* conglomerate number, because that descriptor's
* backing conglomerate was dropped, as well. If
* we're going to share, we have to share with a
* descriptor whose backing physical conglomerate
* is still around.
*/
continue;
}
IndexRowGenerator irg = cd.getIndexDescriptor();
int[] bcps = irg.baseColumnPositions();
boolean[] ia = irg.isAscending();
int j = 0;
/* The conditions which allow an index to share an existing
* conglomerate are as follows:
*
* 1. the set of columns (both key and include columns) and their
* order in the index is the same as that of an existing index AND
*
* 2. the ordering attributes are the same AND
*
* 3. one of the following is true:
* a) the existing index is unique, OR
* b) the existing index is non-unique with uniqueWhenNotNulls
* set to TRUE and the index being created is non-unique, OR
* c) both the existing index and the one being created are
* non-unique and have uniqueWithDuplicateNulls set to FALSE.
*
* 4. hasDeferrableChecking is FALSE.
*/
boolean possibleShare = (irg.isUnique() || !unique) && (bcps.length == baseColumnPositions.length) && !hasDeferrableChecking;
// is set to true (backing index for unique constraint)
if (possibleShare && !irg.isUnique()) {
/* If the existing index has uniqueWithDuplicateNulls set to
* TRUE it can be shared by other non-unique indexes; otherwise
* the existing non-unique index has uniqueWithDuplicateNulls
* set to FALSE, which means the new non-unique conglomerate
* can only share if it has uniqueWithDuplicateNulls set to
* FALSE, as well.
*/
possibleShare = (irg.isUniqueWithDuplicateNulls() || !uniqueWithDuplicateNulls);
}
if (possibleShare && indexType.equals(irg.indexType())) {
for (; j < bcps.length; j++) {
if ((bcps[j] != baseColumnPositions[j]) || (ia[j] != isAscending[j]))
break;
}
}
if (// share
j == baseColumnPositions.length) {
/*
* Don't allow users to create a duplicate index. Allow if being done internally
* for a constraint
*/
if (!isConstraint) {
activation.addWarning(StandardException.newWarning(SQLState.LANG_INDEX_DUPLICATE, indexName, cd.getConglomerateName()));
return;
}
/* Sharing indexes share the physical conglomerate
* underneath, so pull the conglomerate number from
* the existing conglomerate descriptor.
*/
conglomId = cd.getConglomerateNumber();
/* We create a new IndexRowGenerator because certain
* attributes--esp. uniqueness--may be different between
* the index we're creating and the conglomerate that
* already exists. I.e. even though we're sharing a
* conglomerate, the new index is not necessarily
* identical to the existing conglomerate. We have to
* keep track of that info so that if we later drop
* the shared physical conglomerate, we can figure out
* what this index (the one we're creating now) is
* really supposed to look like.
*/
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, // uniqueDeferrable
false, // deferrable indexes are not shared
false, baseColumnPositions, isAscending, baseColumnPositions.length);
// DERBY-655 and DERBY-1343
// Sharing indexes will have unique logical conglomerate UUIDs.
conglomerateUUID = dd.getUUIDFactory().createUUID();
shareExisting = true;
break;
}
}
/* If we have a droppedConglomNum then the index we're about to
* "create" already exists--i.e. it has an index descriptor and
* the corresponding information is already in the system catalogs.
* The only thing we're missing, then, is the physical conglomerate
* to back the index (because the old conglomerate was dropped).
*/
boolean alreadyHaveConglomDescriptor = (droppedConglomNum > -1L);
/* If this index already has an essentially same one, we share the
* conglomerate with the old one, and just simply add a descriptor
* entry into SYSCONGLOMERATES--unless we already have a descriptor,
* in which case we don't even need to do that.
*/
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
if (shareExisting && !alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
// can't just return yet, need to get member "indexTemplateRow"
// because create constraint may use it
}
// Describe the properties of the index to the store using Properties
// RESOLVE: The following properties assume a BTREE index.
Properties indexProperties;
if (properties != null) {
indexProperties = properties;
} else {
indexProperties = new Properties();
}
// Tell it the conglomerate id of the base table
indexProperties.put("baseConglomerateId", Long.toString(td.getHeapConglomerateId()));
if (uniqueWithDuplicateNulls && !hasDeferrableChecking) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexProperties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
} else {
// index creating a unique index instead.
if (uniqueWithDuplicateNulls) {
unique = true;
}
}
}
// All indexes are unique because they contain the RowLocation.
// The number of uniqueness columns must include the RowLocation
// if the user did not specify a unique index.
indexProperties.put("nUniqueColumns", Integer.toString(unique ? baseColumnPositions.length : baseColumnPositions.length + 1));
// By convention, the row location column is the last column
indexProperties.put("rowLocationColumn", Integer.toString(baseColumnPositions.length));
// For now, all columns are key fields, including the RowLocation
indexProperties.put("nKeyFields", Integer.toString(baseColumnPositions.length + 1));
// For now, assume that all index columns are ordered columns
if (!shareExisting) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, uniqueDeferrable, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), baseColumnPositions, isAscending, baseColumnPositions.length);
} else {
indexRowGenerator = new IndexRowGenerator(indexType, unique, false, false, false, baseColumnPositions, isAscending, baseColumnPositions.length);
}
}
/* Now add the rows from the base table to the conglomerate.
* We do this by scanning the base table and inserting the
* rows into a sorter before inserting from the sorter
* into the index. This gives us better performance
* and a more compact index.
*/
rowSource = null;
sortId = 0;
// set to true once the sorter is created
boolean needToDropSort = false;
/* bulkFetchSIze will be 16 (for now) unless
* we are creating the table in which case it
* will be 1. Too hard to remove scan when
* creating index on new table, so minimize
* work where we can.
*/
int bulkFetchSize = (forCreateTable) ? 1 : 16;
int numColumns = td.getNumberOfColumns();
int approximateRowSize = 0;
// Create the FormatableBitSet for mapping the partial to full base row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
for (int index = 0; index < baseColumnPositions.length; index++) {
bitSet.set(baseColumnPositions[index]);
}
FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);
// Start by opening a full scan on the base table.
scan = tc.openGroupFetchScan(td.getHeapConglomerateId(), // hold
false, // open base table read only
0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, // all fields as objects
zeroBasedBitSet, // startKeyValue
(DataValueDescriptor[]) null, // not used when giving null start posn.
0, // qualifier
null, // stopKeyValue
(DataValueDescriptor[]) null, // not used when giving null stop posn.
0);
// Create an array to put base row template
baseRows = new ExecRow[bulkFetchSize];
indexRows = new ExecIndexRow[bulkFetchSize];
compactBaseRows = new ExecRow[bulkFetchSize];
try {
// Create the array of base row template
for (int i = 0; i < bulkFetchSize; i++) {
// create a base row template
baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition);
// create an index row template
indexRows[i] = indexRowGenerator.getIndexRowTemplate();
// create a compact base row template
compactBaseRows[i] = activation.getExecutionFactory().getValueRow(baseColumnPositions.length);
}
indexTemplateRow = indexRows[0];
// Fill the partial row with nulls of the correct type
ColumnDescriptorList cdl = td.getColumnDescriptorList();
int cdlSize = cdl.size();
for (int index = 0, numSet = 0; index < cdlSize; index++) {
if (!zeroBasedBitSet.get(index)) {
continue;
}
numSet++;
ColumnDescriptor cd = cdl.elementAt(index);
DataTypeDescriptor dts = cd.getType();
for (int i = 0; i < bulkFetchSize; i++) {
// Put the column in both the compact and sparse base rows
baseRows[i].setColumn(index + 1, dts.getNull());
compactBaseRows[i].setColumn(numSet, baseRows[i].getColumn(index + 1));
}
// Calculate the approximate row size for the index row
approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts);
}
// Get an array of RowLocation template
RowLocation[] rl = new RowLocation[bulkFetchSize];
for (int i = 0; i < bulkFetchSize; i++) {
rl[i] = scan.newRowLocationTemplate();
// Get an index row based on the base row
indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet);
}
/* now that we got indexTemplateRow, done for sharing index
*/
if (shareExisting)
return;
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key.
* We create a basic sort observer for non-unique indexes
* so that we can reuse the wrappers during an external
* sort.
*/
int numColumnOrderings;
SortObserver sortObserver;
Properties sortProperties = null;
if (unique || uniqueWithDuplicateNulls || uniqueDeferrable) {
// if the index is a constraint, use constraintname in
// possible error message
String indexOrConstraintName = indexName;
if (conglomerateUUID != null) {
ConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID);
if ((isConstraint) && (cd != null && cd.getUUID() != null && td != null)) {
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
}
}
if (unique || uniqueDeferrable) {
numColumnOrderings = unique ? baseColumnPositions.length : baseColumnPositions.length + 1;
sortObserver = new UniqueIndexSortObserver(lcc, constraintID, true, uniqueDeferrable, initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
} else {
// unique with duplicate nulls allowed.
numColumnOrderings = baseColumnPositions.length + 1;
// tell transaction controller to use the unique with
// duplicate nulls sorter, when making createSort() call.
sortProperties = new Properties();
sortProperties.put(AccessFactoryGlobals.IMPL_TYPE, AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
// use sort operator which treats nulls unequal
sortObserver = new UniqueWithDuplicateNullsIndexSortObserver(lcc, constraintID, true, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
}
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(true, false, indexTemplateRow, true);
}
ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings];
for (int i = 0; i < numColumnOrderings; i++) {
order[i] = new IndexColumnOrder(i, unique || i < numColumnOrderings - 1 ? isAscending[i] : true);
}
// create the sorter
sortId = tc.createSort(sortProperties, indexTemplateRow.getRowArrayClone(), order, sortObserver, // not in order
false, scan.getEstimatedRowCount(), // est row size, -1 means no idea
approximateRowSize);
needToDropSort = true;
// Populate sorter and get the output of the sorter into a row
// source. The sorter has the indexed columns only and the columns
// are in the correct order.
rowSource = loadSorter(baseRows, indexRows, tc, scan, sortId, rl);
conglomId = tc.createAndLoadConglomerate(indexType, // index row template
indexTemplateRow.getRowArray(), // colums sort order
order, indexRowGenerator.getColumnCollationIds(td.getColumnDescriptorList()), indexProperties, // not temporary
TransactionController.IS_DEFAULT, rowSource, (long[]) null);
} finally {
/* close the table scan */
if (scan != null)
scan.close();
/* close the sorter row source before throwing exception */
if (rowSource != null)
rowSource.closeRowSource();
/*
** drop the sort so that intermediate external sort run can be
** removed from disk
*/
if (needToDropSort)
tc.dropSort(sortId);
}
ConglomerateController indexController = tc.openConglomerate(conglomId, false, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
// Check to make sure that the conglomerate can be used as an index
if (!indexController.isKeyed()) {
indexController.close();
throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName, indexType);
}
indexController.close();
//
if (!alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
/* Since we created a new conglomerate descriptor, load
* its UUID into the corresponding field, to ensure that
* it is properly set in the StatisticsDescriptor created
* below.
*/
conglomerateUUID = cgd.getUUID();
}
CardinalityCounter cCount = (CardinalityCounter) rowSource;
long numRows = cCount.getRowCount();
if (addStatistics(dd, indexRowGenerator, numRows)) {
long[] c = cCount.getCardinality();
for (int i = 0; i < c.length; i++) {
StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), conglomerateUUID, td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1);
dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc);
}
}
}
Aggregations