use of org.apache.derby.iapi.sql.dictionary.TableDescriptor in project derby by apache.
the class AlterConstraintConstantAction method executeConstantAction.
/**
* This is the guts of the Execution-time logic for ALTER CONSTRAINT.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
final LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
final DataDictionary dd = lcc.getDataDictionary();
final DependencyManager dm = dd.getDependencyManager();
final TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
final TableDescriptor td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tableName);
}
/* Table gets locked in AlterTableConstantAction */
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor tdSd = td.getSchemaDescriptor();
SchemaDescriptor constraintSd = constraintSchemaName == null ? tdSd : dd.getSchemaDescriptor(constraintSchemaName, tc, true);
/* Get the constraint descriptor for the index, along
* with an exclusive row lock on the row in sys.sysconstraints
* in order to ensure that no one else compiles against the
* index.
*/
final ConstraintDescriptor conDesc = dd.getConstraintDescriptorByName(td, constraintSd, constraintName, true);
if (conDesc == null) {
throw StandardException.newException(SQLState.LANG_DROP_OR_ALTER_NON_EXISTING_CONSTRAINT, constraintSd.getSchemaName() + "." + constraintName, td.getQualifiedName());
}
if (characteristics[2] != ConstraintDefinitionNode.ENFORCED_DEFAULT) {
dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, "DEFERRED CONSTRAINTS");
if (constraintType == DataDictionary.FOREIGNKEY_CONSTRAINT || constraintType == DataDictionary.NOTNULL_CONSTRAINT || !characteristics[2]) /* not enforced */
{
// Remove when feature DERBY-532 is completed
if (!PropertyUtil.getSystemProperty("derby.constraintsTesting", "false").equals("true")) {
throw StandardException.newException(SQLState.NOT_IMPLEMENTED, "non-default enforcement");
}
}
}
// The first two characteristics are unused during ALTER CONSTRAINT; only
// enforcement can change.
conDesc.setEnforced(characteristics[2]);
int[] colsToSet = new int[1];
colsToSet[0] = SYSCONSTRAINTSRowFactory.SYSCONSTRAINTS_STATE;
dd.updateConstraintDescriptor(conDesc, conDesc.getUUID(), colsToSet, tc);
}
use of org.apache.derby.iapi.sql.dictionary.TableDescriptor in project derby by apache.
the class GenericLanguageConnectionContext method tempTablesXApostCommit.
private void tempTablesXApostCommit() throws StandardException {
TransactionController tc = getTransactionExecute();
// transaction boundary.
for (int i = 0; i < allDeclaredGlobalTempTables.size(); i++) {
// remove all temp tables from this context.
TableDescriptor td = allDeclaredGlobalTempTables.get(i).getTableDescriptor();
// remove the conglomerate created for this temp table
tc.dropConglomerate(td.getHeapConglomerateId());
// remove it from the list of temp tables
allDeclaredGlobalTempTables.remove(i);
}
tc.commit();
}
use of org.apache.derby.iapi.sql.dictionary.TableDescriptor in project derby by apache.
the class GenericLanguageConnectionContext method tempTablesAndRollback.
/**
* do the necessary work at rollback time for temporary tables
* 1)If a temp table was declared in the UOW, then drop it and remove it
* from list of temporary tables.
* 2)If a temp table was declared and dropped in the UOW, then remove it
* from list of temporary tables.
* 3)If an existing temp table was dropped in the UOW, then recreate it
* with no data.
* 4)If an existing temp table was modified in the UOW, then get rid of
* all the rows from the table.
*/
private void tempTablesAndRollback() throws StandardException {
for (int i = allDeclaredGlobalTempTables.size() - 1; i >= 0; i--) {
TempTableInfo tempTableInfo = allDeclaredGlobalTempTables.get(i);
if (tempTableInfo.getDeclaredInSavepointLevel() >= currentSavepointLevel) {
if (tempTableInfo.getDroppedInSavepointLevel() == -1) {
// the table was declared but not dropped in the unit of
// work getting rolled back and hence we will remove it
// from valid list of temporary tables and drop the
// conglomerate associated with it
TableDescriptor td = tempTableInfo.getTableDescriptor();
invalidateCleanupDroppedTable(td);
// remove the conglomerate created for this temp table
tran.dropConglomerate(td.getHeapConglomerateId());
// remove it from the list of temp tables
allDeclaredGlobalTempTables.remove(i);
} else if (tempTableInfo.getDroppedInSavepointLevel() >= currentSavepointLevel) {
// the table was declared and dropped in the unit of work
// getting rolled back
allDeclaredGlobalTempTables.remove(i);
}
} else if (tempTableInfo.getDroppedInSavepointLevel() >= currentSavepointLevel) {
// this means the table was declared in an earlier savepoint
// unit / transaction and then dropped in current UOW
// restore the old definition of temp table because drop is
// being rolledback
TableDescriptor td = tempTableInfo.getTableDescriptor();
td = cleanupTempTableOnCommitOrRollback(td, false);
// In order to store the old conglomerate information for the
// temp table, we need to replace the existing table descriptor
// with the old table descriptor which has the old conglomerate
// information
tempTableInfo.setTableDescriptor(td);
tempTableInfo.setDroppedInSavepointLevel(-1);
// following will mark the table as not modified. This is
// because the table data has been deleted as part of the
// current rollback
tempTableInfo.setModifiedInSavepointLevel(-1);
allDeclaredGlobalTempTables.set(i, tempTableInfo);
} else if (tempTableInfo.getModifiedInSavepointLevel() >= currentSavepointLevel) {
// this means the table was declared in an earlier savepoint
// unit / transaction and modified in current UOW
// following will mark the table as not modified. This is
// because the table data will be deleted as part of the
// current rollback
tempTableInfo.setModifiedInSavepointLevel(-1);
TableDescriptor td = tempTableInfo.getTableDescriptor();
invalidateCleanupDroppedTable(td);
}
// there is no else here because there is no special processing
// required for temp tables declares in earlier work of
// unit/transaction and not modified
}
if (allDeclaredGlobalTempTables.isEmpty()) {
allDeclaredGlobalTempTables = null;
}
}
use of org.apache.derby.iapi.sql.dictionary.TableDescriptor in project derby by apache.
the class BasicDependencyManager method coreInvalidateFor.
/**
* A version of invalidateFor that does not provide synchronization among
* invalidators.
*
* @param p the provider
* @param action the action causing the invalidation
* @param lcc language connection context
*
* @throws StandardException if something goes wrong
*/
private void coreInvalidateFor(Provider p, int action, LanguageConnectionContext lcc) throws StandardException {
List<Dependency> list = getDependents(p);
if (list.isEmpty()) {
return;
}
// affectedCols is passed in from table descriptor provider to indicate
// which columns it cares; subsetCols is affectedCols' intersection
// with column bit map found in the provider of SYSDEPENDS line to
// find out which columns really matter. If SYSDEPENDS line's
// dependent is view (or maybe others), provider is table, yet it
// doesn't have column bit map because the view was created in a
// previous version of server which doesn't support column dependency,
// and we really want it to have (such as in drop column), in any case
// if we passed in table descriptor to this function with a bit map,
// we really need this, we generate the bitmaps on the fly and update
// SYSDEPENDS
//
// Note: Since the "previous version of server" mentioned above must
// be a version that predates Derby, and we don't support upgrade from
// those versions, we no longer have code to generate the column
// dependency list on the fly. Instead, an assert has been added to
// verify that we always have a column bitmap in SYSDEPENDS if the
// affectedCols bitmap is non-null.
FormatableBitSet affectedCols = null, subsetCols = null;
if (p instanceof TableDescriptor) {
affectedCols = ((TableDescriptor) p).getReferencedColumnMap();
if (affectedCols != null)
subsetCols = new FormatableBitSet(affectedCols.getLength());
}
{
StandardException noInvalidate = null;
// entries from this list.
for (int ei = list.size() - 1; ei >= 0; ei--) {
if (ei >= list.size())
continue;
Dependency dependency = list.get(ei);
Dependent dep = dependency.getDependent();
if (affectedCols != null) {
TableDescriptor td = (TableDescriptor) dependency.getProvider();
FormatableBitSet providingCols = td.getReferencedColumnMap();
if (providingCols == null) {
if (dep instanceof ViewDescriptor) {
// this code was removed as part of DERBY-6169.
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("Expected view to " + "have referenced column bitmap");
}
} else
// if dep instanceof ViewDescriptor
((TableDescriptor) p).setReferencedColumnMap(null);
} else // if providingCols == null
{
subsetCols.copyFrom(affectedCols);
subsetCols.and(providingCols);
if (subsetCols.anySetBit() == -1)
continue;
((TableDescriptor) p).setReferencedColumnMap(subsetCols);
}
}
// generate a list of invalidations that fail.
try {
dep.prepareToInvalidate(p, action, lcc);
} catch (StandardException sqle) {
if (noInvalidate == null) {
noInvalidate = sqle;
} else {
try {
sqle.initCause(noInvalidate);
noInvalidate = sqle;
} catch (IllegalStateException ise) {
// We weren't able to chain the exceptions. That's
// OK, since we always have the first exception we
// caught. Just skip the current exception.
}
}
}
if (noInvalidate == null) {
if (affectedCols != null)
((TableDescriptor) p).setReferencedColumnMap(affectedCols);
// REVISIT: future impl will want to mark the individual
// dependency as invalid as well as the dependent...
dep.makeInvalid(action, lcc);
}
}
if (noInvalidate != null)
throw noInvalidate;
}
}
use of org.apache.derby.iapi.sql.dictionary.TableDescriptor in project derby by apache.
the class PredicateList method selectivity.
/**
* @see OptimizablePredicateList#selectivity
*/
public double selectivity(Optimizable optTable) throws StandardException {
TableDescriptor td = optTable.getTableDescriptor();
ConglomerateDescriptor[] conglomerates = td.getConglomerateDescriptors();
int numPredicates = size();
int numConglomerates = conglomerates.length;
if (numConglomerates == 1)
// one conglomerate; must be heap.
return -1.0d;
if (numPredicates == 0)
// no predicates why bother?
return -1.0d;
boolean nothingYet = true;
/* before we start, lets select non-redundant prediates into a working
* list; we'll work with the workingPredicates list from now on in this
* routine.
*/
PredicateList workingPredicates = new PredicateList(getContextManager());
for (int i = 0; i < numPredicates; i++) {
if (isRedundantPredicate(i))
continue;
/* to workingPredicates only add useful predicates... */
workingPredicates.addOptPredicate(elementAt(i));
}
int numWorkingPredicates = workingPredicates.size();
/*--------------------------------------------------------------------
* In the first phase, the routine initializes an array of
* predicateWrapperLists-- one list for each conglomerate that has
* statistics.
*
* predsForConglomerates is an array of pwList. For each conglomerate we
* keep a pwList of predicates that have an equals predicate on a column
* in the conglomerate.
*
* As an example consider a table T, with indices on
* T(c1)-->conglom_one, T(c2,c1)-->conglom_two.
*
* if we have the following predicates:
* T.c1=T1.x (p1) and T.c1=T2.y (p2) and T.c2=T1.z (p3), then we'll have the
* after the first loop is done, we'll have the following setup.
*
* conglom_one: pwList [p1,p2]
* conglom_two: pwList [p1,p2,p3].
*
* Note that although p1,p2 appear on both conglomerates, the
* indexPosition of p1 and p2 on the first list is 0 (first index
* position) while the index position of p1,p2 on the second list is 1
* (second index position).
*
* PredicateWrapper and PredicateWrapperLists are inner classes used
* only in this file.
* -------------------------------------------------------------------- */
PredicateWrapperList[] predsForConglomerates = new PredicateWrapperList[numConglomerates];
for (int i = 0; i < numConglomerates; i++) {
ConglomerateDescriptor cd = conglomerates[i];
if (!cd.isIndex())
continue;
if (!td.statisticsExist(cd))
continue;
int[] baseColumnList = cd.getIndexDescriptor().baseColumnPositions();
for (int j = 0; j < numWorkingPredicates; j++) {
Predicate pred = workingPredicates.elementAt(j);
int ip = pred.hasEqualOnColumnList(baseColumnList, optTable);
if (ip < 0)
// look at the next predicate.
continue;
nothingYet = false;
if (predsForConglomerates[i] == null) {
predsForConglomerates[i] = new PredicateWrapperList(numWorkingPredicates);
}
PredicateWrapper newpw = new PredicateWrapper(ip, pred, j);
predsForConglomerates[i].insert(newpw);
}
// for (j = 0;
}
if (nothingYet) {
return -1.0;
}
/*------------------------------------------------------------------
* In the second phase we,
* walk the predsForConglomerateList again-- if we find
* a break in the indexPositions we remove the predicates
* after the gap. To clarify, if we have equal predicates on the first
* and the third index positions, we can throw away the predicate on
* the 3rd index position-- it doesn't do us any good.
*-------------------------------------------------------------------*/
int maxOverlap = -1;
for (int i = 0; i < numConglomerates; i++) {
if (predsForConglomerates[i] == null)
continue;
predsForConglomerates[i].retainLeadingContiguous();
}
// for (i = 0; i < ...)
calculateWeight(predsForConglomerates, numWorkingPredicates);
/*-------------------------------------------------------------------
* In the third phase we loop through predsForConglomerates choosing the
* best fit (chooseLongestMatch) of predicates. we use the statistics
* for the set of predicates returned by chooseLongestMatch and then
* loop until we can't find any more statistics or we have exhausted all
* the predicates for which we are trying to find statistics.
*--------------------------------------------------------------------*/
double selectivity = 1.0;
ArrayList<Predicate> maxPreds = new ArrayList<Predicate>();
while (true) {
maxPreds.clear();
int conglomIndex = chooseLongestMatch(predsForConglomerates, maxPreds, numWorkingPredicates);
if (conglomIndex == -1)
// no more stats available.
break;
selectivity *= td.selectivityForConglomerate(conglomerates[conglomIndex], maxPreds.size());
for (int i = 0; i < maxPreds.size(); i++) {
/* remove the predicates that we've calculated the selectivity
* of, from workingPredicates.
*/
Predicate p = maxPreds.get(i);
workingPredicates.removeOptPredicate(p);
}
if (workingPredicates.size() == 0)
break;
}
if (workingPredicates.size() != 0) {
selectivity *= workingPredicates.selectivityNoStatistics(optTable);
}
return selectivity;
}
Aggregations