use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.
the class RenameConstantAction method execGutsRenameTable.
// do necessary work for rename table at execute time.
private void execGutsRenameTable(TableDescriptor td, Activation activation) throws StandardException {
ConstraintDescriptorList constraintDescriptorList;
ConstraintDescriptor constraintDescriptor;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
dm.invalidateFor(td, DependencyManager.RENAME, lcc);
/* look for foreign key dependency on the table. If found any,
use dependency manager to pass the rename action to the
dependents. */
constraintDescriptorList = dd.getConstraintDescriptors(td);
for (int index = 0; index < constraintDescriptorList.size(); index++) {
constraintDescriptor = constraintDescriptorList.elementAt(index);
if (constraintDescriptor instanceof ReferencedKeyConstraintDescriptor)
dm.invalidateFor(constraintDescriptor, DependencyManager.RENAME, lcc);
}
// Drop the table
dd.dropTableDescriptor(td, sd, tc);
// Change the table name of the table descriptor
td.setTableName(newTableName);
// add the table descriptor with new name
dd.addDescriptor(td, sd, DataDictionary.SYSTABLES_CATALOG_NUM, false, tc);
}
use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.
the class InsertResultSet method emptyIndexes.
/**
* Empty the indexes after doing a bulk insert replace
* where the table has 0 rows after the replace.
* RESOLVE: This method is ugly! Prior to 2.0, we simply
* scanned back across the table to build the indexes. We
* changed this in 2.0 to populate the sorters via a call back
* as we populated the table. Doing a 0 row replace into a
* table with indexes is a degenerate case, hence we allow
* ugly and unoptimized code.
*
* @exception StandardException Thrown on failure
*/
private void emptyIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException {
int numIndexes = constants.irgs.length;
ExecIndexRow[] idxRows = new ExecIndexRow[numIndexes];
ExecRow baseRows;
ColumnOrdering[][] order = new ColumnOrdering[numIndexes][];
int numColumns = td.getNumberOfColumns();
collation = new int[numIndexes][];
// Create the BitSet for mapping the partial row to the full row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
// Need to check each index for referenced columns
int numReferencedColumns = 0;
for (int index = 0; index < numIndexes; index++) {
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
for (int bcp = 0; bcp < baseColumnPositions.length; bcp++) {
if (!bitSet.get(baseColumnPositions[bcp])) {
bitSet.set(baseColumnPositions[bcp]);
numReferencedColumns++;
}
}
}
// We can finally create the partial base row
baseRows = activation.getExecutionFactory().getValueRow(numReferencedColumns);
// Fill in each base row with nulls of the correct data type
int colNumber = 0;
for (int index = 0; index < numColumns; index++) {
if (bitSet.get(index + 1)) {
colNumber++;
// NOTE: 1-based column numbers
baseRows.setColumn(colNumber, fullTemplate.getColumn(index + 1).cloneValue(false));
}
}
needToDropSort = new boolean[numIndexes];
sortIds = new long[numIndexes];
/* Do the initial set up before scanning the heap.
* For each index, build a single index row and a sorter.
*/
for (int index = 0; index < numIndexes; index++) {
// create a single index row template for each index
idxRows[index] = constants.irgs[index].getIndexRowTemplate();
// Get an index row based on the base row
// (This call is only necessary here because we need to pass a
// template to the sorter.)
constants.irgs[index].getIndexRow(baseRows, rl, idxRows[index], bitSet);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key
*/
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
boolean[] isAscending = constants.irgs[index].isAscending();
int numColumnOrderings;
SortObserver sortObserver;
final IndexRowGenerator indDes = cd.getIndexDescriptor();
if (indDes.isUnique() || indDes.isUniqueDeferrable()) {
numColumnOrderings = indDes.isUnique() ? baseColumnPositions.length : baseColumnPositions.length + 1;
String indexOrConstraintName = cd.getConglomerateName();
boolean deferred = false;
boolean uniqueDeferrable = false;
UUID uniqueDeferrableConstraintId = null;
if (cd.isConstraint()) {
// so, the index is backing up a constraint
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
deferred = lcc.isEffectivelyDeferred(lcc.getCurrentSQLSessionContext(activation), conDesc.getUUID());
uniqueDeferrable = conDesc.deferrable();
uniqueDeferrableConstraintId = conDesc.getUUID();
}
sortObserver = new UniqueIndexSortObserver(lcc, uniqueDeferrableConstraintId, // don't clone rows
false, uniqueDeferrable, deferred, indexOrConstraintName, idxRows[index], true, td.getName());
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(false, false, idxRows[index], true);
}
order[index] = new ColumnOrdering[numColumnOrderings];
for (int ii = 0; ii < isAscending.length; ii++) {
order[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
if (numColumnOrderings > isAscending.length) {
order[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
}
// create the sorters
sortIds[index] = tc.createSort((Properties) null, idxRows[index].getRowArrayClone(), order[index], sortObserver, // not in order
false, // est rows
rowCount, // est row size, -1 means no idea
-1);
needToDropSort[index] = true;
}
// Populate sorters and get the output of each sorter into a row
// source. The sorters have the indexed columns only and the columns
// are in the correct order.
rowSources = new RowLocationRetRowSource[numIndexes];
// Fill in the RowSources
SortController[] sorter = new SortController[numIndexes];
for (int index = 0; index < numIndexes; index++) {
sorter[index] = tc.openSort(sortIds[index]);
sorter[index].completedInserts();
rowSources[index] = tc.openSortRowSource(sortIds[index]);
}
long[] newIndexCongloms = new long[numIndexes];
// Populate each index
for (int index = 0; index < numIndexes; index++) {
ConglomerateController indexCC;
Properties properties = new Properties();
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
// Build the properties list for the new conglomerate
indexCC = tc.openCompiledConglomerate(false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.indexSCOCIs[index], indexDCOCIs[index]);
// Get the properties on the old index
indexCC.getInternalTablePropertySet(properties);
/* Create the properties that language supplies when creating the
* the index. (The store doesn't preserve these.)
*/
int indexRowLength = idxRows[index].nColumns();
properties.put("baseConglomerateId", Long.toString(newHeapConglom));
if (cd.getIndexDescriptor().isUnique()) {
properties.put("nUniqueColumns", Integer.toString(indexRowLength - 1));
} else {
properties.put("nUniqueColumns", Integer.toString(indexRowLength));
}
if (cd.getIndexDescriptor().isUniqueWithDuplicateNulls() && !cd.getIndexDescriptor().hasDeferrableChecking()) {
properties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
}
properties.put("rowLocationColumn", Integer.toString(indexRowLength - 1));
properties.put("nKeyFields", Integer.toString(indexRowLength));
indexCC.close();
collation[index] = constants.irgs[index].getColumnCollationIds(td.getColumnDescriptorList());
// We can finally drain the sorter and rebuild the index
// Populate the index.
newIndexCongloms[index] = tc.createAndLoadConglomerate("BTREE", idxRows[index].getRowArray(), // default column sort order
null, collation[index], properties, TransactionController.IS_DEFAULT, rowSources[index], (long[]) null);
/* Update the DataDictionary
*
* Update sys.sysconglomerates with new conglomerate #, if the
* conglomerate is shared by duplicate indexes, all the descriptors
* for those indexes need to be updated with the new number.
*/
dd.updateConglomerateDescriptor(td.getConglomerateDescriptors(constants.indexCIDS[index]), newIndexCongloms[index], tc);
// Drop the old conglomerate
tc.dropConglomerate(constants.indexCIDS[index]);
}
}
use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.
the class DropTableConstantAction method dropAllConstraintDescriptors.
private void dropAllConstraintDescriptors(TableDescriptor td, Activation activation) throws StandardException {
ConstraintDescriptor cd;
ConstraintDescriptorList cdl;
ConstraintDescriptor fkcd;
ConstraintDescriptorList fkcdl;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
cdl = dd.getConstraintDescriptors(td);
/* The current element will be deleted underneath
* the loop, so we only increment the counter when
* skipping an element. (HACK!)
*/
for (int index = 0; index < cdl.size(); ) {
cd = cdl.elementAt(index);
if (cd instanceof ReferencedKeyConstraintDescriptor) {
index++;
continue;
}
dm.invalidateFor(cd, DependencyManager.DROP_CONSTRAINT, lcc);
dropConstraint(cd, td, activation, lcc, true);
}
/* The current element will be deleted underneath
* the loop. (HACK!)
*/
while (cdl.size() > 0) {
cd = cdl.elementAt(0);
if (SanityManager.DEBUG) {
if (!(cd instanceof ReferencedKeyConstraintDescriptor)) {
SanityManager.THROWASSERT("Constraint descriptor not an instance of " + "ReferencedKeyConstraintDescriptor as expected. Is a " + cd.getClass().getName());
}
}
/*
** Drop the referenced constraint (after we got
** the primary keys) now. Do this prior to
** droping the referenced keys to avoid performing
** a lot of extra work updating the referencedcount
** field of sys.sysconstraints.
**
** Pass in false to dropConstraintsAndIndex so it
** doesn't clear dependencies, we'll do that ourselves.
*/
dropConstraint(cd, td, activation, lcc, false);
/*
** If we are going to cascade, get all the
** referencing foreign keys and zap them first.
*/
if (cascade) {
/*
** Go to the system tables to get the foreign keys
** to be safe
*/
fkcdl = dd.getForeignKeys(cd.getUUID());
/*
** For each FK that references this key, drop
** it.
*/
for (int inner = 0; inner < fkcdl.size(); inner++) {
fkcd = (ConstraintDescriptor) fkcdl.elementAt(inner);
dm.invalidateFor(fkcd, DependencyManager.DROP_CONSTRAINT, lcc);
dropConstraint(fkcd, td, activation, lcc, true);
activation.addWarning(StandardException.newWarning(SQLState.LANG_CONSTRAINT_DROPPED, fkcd.getConstraintName(), fkcd.getTableDescriptor().getName()));
}
}
/*
** Now that we got rid of the fks (if we were cascading), it is
** ok to do an invalidate for.
*/
dm.invalidateFor(cd, DependencyManager.DROP_CONSTRAINT, lcc);
dm.clearDependencies(lcc, cd);
}
}
use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.
the class SetConstraintsConstantAction method executeConstantAction.
/**
* This is the guts of the execution time logic for SET CONSTRAINT.
*
* @param activation
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
final LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
final DataDictionary dd = lcc.getDataDictionary();
final List<String> boundConstraints = new ArrayList<String>();
if (constraints != null) {
for (TableName c : constraints) {
final SchemaDescriptor sd = dd.getSchemaDescriptor(c.getSchemaName(), lcc.getTransactionExecute(), true);
final ConstraintDescriptor cd = dd.getConstraintDescriptor(c.getTableName(), sd.getUUID());
if (cd == null) {
throw StandardException.newException(SQLState.LANG_OBJECT_NOT_FOUND, "CONSTRAINT", c.getFullSQLName());
}
final String bound = IdUtil.normalToDelimited(sd.getSchemaName()) + "." + IdUtil.normalToDelimited(cd.getConstraintName());
if (boundConstraints.contains(bound)) {
throw StandardException.newException(SQLState.LANG_DB2_DUPLICATE_NAMES, cd.getConstraintName(), bound);
} else {
boundConstraints.add(bound);
}
if (deferred && !cd.deferrable()) {
throw StandardException.newException(SQLState.LANG_SET_CONSTRAINT_NOT_DEFERRABLE, cd.getConstraintName());
}
lcc.setConstraintDeferred(activation, cd, deferred);
}
} else {
lcc.setDeferredAll(activation, deferred);
}
}
use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.
the class AlterConstraintConstantAction method executeConstantAction.
/**
* This is the guts of the Execution-time logic for ALTER CONSTRAINT.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
final LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
final DataDictionary dd = lcc.getDataDictionary();
final DependencyManager dm = dd.getDependencyManager();
final TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
final TableDescriptor td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tableName);
}
/* Table gets locked in AlterTableConstantAction */
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor tdSd = td.getSchemaDescriptor();
SchemaDescriptor constraintSd = constraintSchemaName == null ? tdSd : dd.getSchemaDescriptor(constraintSchemaName, tc, true);
/* Get the constraint descriptor for the index, along
* with an exclusive row lock on the row in sys.sysconstraints
* in order to ensure that no one else compiles against the
* index.
*/
final ConstraintDescriptor conDesc = dd.getConstraintDescriptorByName(td, constraintSd, constraintName, true);
if (conDesc == null) {
throw StandardException.newException(SQLState.LANG_DROP_OR_ALTER_NON_EXISTING_CONSTRAINT, constraintSd.getSchemaName() + "." + constraintName, td.getQualifiedName());
}
if (characteristics[2] != ConstraintDefinitionNode.ENFORCED_DEFAULT) {
dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, "DEFERRED CONSTRAINTS");
if (constraintType == DataDictionary.FOREIGNKEY_CONSTRAINT || constraintType == DataDictionary.NOTNULL_CONSTRAINT || !characteristics[2]) /* not enforced */
{
// Remove when feature DERBY-532 is completed
if (!PropertyUtil.getSystemProperty("derby.constraintsTesting", "false").equals("true")) {
throw StandardException.newException(SQLState.NOT_IMPLEMENTED, "non-default enforcement");
}
}
}
// The first two characteristics are unused during ALTER CONSTRAINT; only
// enforcement can change.
conDesc.setEnforced(characteristics[2]);
int[] colsToSet = new int[1];
colsToSet[0] = SYSCONSTRAINTSRowFactory.SYSCONSTRAINTS_STATE;
dd.updateConstraintDescriptor(conDesc, conDesc.getUUID(), colsToSet, tc);
}
Aggregations