use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class DataDictionaryImpl method getAllDependencyDescriptorsList.
/**
* Build and return an List with DependencyDescriptors for
* all of the stored dependencies.
* This is useful for consistency checking.
*
* @return List List of all DependencyDescriptors.
*
* @exception StandardException Thrown on failure
*/
public List<TupleDescriptor> getAllDependencyDescriptorsList() throws StandardException {
ScanController scanController;
TransactionController tc;
ExecRow outRow;
ExecRow templateRow;
List<TupleDescriptor> ddl = newSList();
TabInfoImpl ti = getNonCoreTI(SYSDEPENDS_CATALOG_NUM);
SYSDEPENDSRowFactory rf = (SYSDEPENDSRowFactory) ti.getCatalogRowFactory();
// Get the current transaction controller
tc = getTransactionCompile();
outRow = rf.makeEmptyRow();
scanController = tc.openScan(// conglomerate to open
ti.getHeapConglomerate(), // don't hold open across commit
false, // for read
0, // scans entire table.
TransactionController.MODE_TABLE, TransactionController.ISOLATION_REPEATABLE_READ, // all fields as objects
(FormatableBitSet) null, // start position - first row
null, // startSearchOperation
ScanController.GE, null, // stop position - through last row
null, // stopSearchOperation
ScanController.GT);
while (scanController.fetchNext(outRow.getRowArray())) {
DependencyDescriptor dependencyDescriptor;
dependencyDescriptor = (DependencyDescriptor) rf.buildDescriptor(outRow, (TupleDescriptor) null, this);
ddl.add(dependencyDescriptor);
}
scanController.close();
return ddl;
}
use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class DataDictionaryImpl method getConstraintDescriptorViaHeap.
/**
* Return a (single or list of) catalog row descriptor(s) from
* SYSCONSTRAINTS through a heap scan
*
* @param scanQualifiers qualifiers
* @param ti The TabInfoImpl to use
* @param parentTupleDescriptor The parentDescriptor, if applicable.
* @param list The list to build, if supplied.
* If null, then caller expects a single descriptor
*
* @return The last matching descriptor
*
* @exception StandardException Thrown on error
*/
protected TupleDescriptor getConstraintDescriptorViaHeap(ScanQualifier[][] scanQualifiers, TabInfoImpl ti, TupleDescriptor parentTupleDescriptor, ConstraintDescriptorList list) throws StandardException {
SYSCONSTRAINTSRowFactory rf = (SYSCONSTRAINTSRowFactory) ti.getCatalogRowFactory();
ExecRow outRow;
ScanController scanController;
TransactionController tc;
ConstraintDescriptor cd = null;
// Get the current transaction controller
tc = getTransactionCompile();
outRow = rf.makeEmptyRow();
/*
** Table scan
*/
scanController = tc.openScan(// conglomerate to open
ti.getHeapConglomerate(), // don't hold open across commit
false, // for read
0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_REPEATABLE_READ, // all fields as objects
(FormatableBitSet) null, // start position - first row
(DataValueDescriptor[]) null, // startSearchOperation - none
0, // scanQualifier,
scanQualifiers, // stop position -through last row
(DataValueDescriptor[]) null, // stopSearchOperation - none
0);
try {
while (scanController.fetchNext(outRow.getRowArray())) {
SubConstraintDescriptor subCD = null;
switch(rf.getConstraintType(outRow)) {
case DataDictionary.PRIMARYKEY_CONSTRAINT:
case DataDictionary.FOREIGNKEY_CONSTRAINT:
case DataDictionary.UNIQUE_CONSTRAINT:
subCD = getSubKeyConstraint(rf.getConstraintId(outRow), rf.getConstraintType(outRow));
break;
case DataDictionary.CHECK_CONSTRAINT:
subCD = getSubCheckConstraint(rf.getConstraintId(outRow));
break;
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("unexpected value from " + " rf.getConstraintType(outRow) " + rf.getConstraintType(outRow));
}
}
if (SanityManager.DEBUG) {
SanityManager.ASSERT(subCD != null, "subCD is expected to be non-null");
}
cd = (ConstraintDescriptor) rf.buildDescriptor(outRow, subCD, this);
/* If dList is null, then caller only wants a single descriptor - we're done
* else just add the current descriptor to the list.
*/
if (list == null) {
break;
} else {
list.add(cd);
}
}
} finally {
scanController.close();
}
return cd;
}
use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class DataDictionaryImpl method getConstraintDescriptorViaIndex.
/**
* Return a (single or list of) ConstraintDescriptor(s) from
* SYSCONSTRAINTS where the access is from the index to the heap.
*
* @param indexId The id of the index (0 to # of indexes on table) to use
* @param keyRow The supplied ExecIndexRow for search
* @param ti The TabInfoImpl to use
* @param td The TableDescriptor, if supplied.
* @param dList The list to build, if supplied. If null, then caller expects
* a single descriptor
* @param forUpdate Whether or not to open scan for update
*
* @return The last matching descriptor
*
* @exception StandardException Thrown on error
*/
protected ConstraintDescriptor getConstraintDescriptorViaIndex(int indexId, ExecIndexRow keyRow, TabInfoImpl ti, TableDescriptor td, ConstraintDescriptorList dList, boolean forUpdate) throws StandardException {
SYSCONSTRAINTSRowFactory rf = (SYSCONSTRAINTSRowFactory) ti.getCatalogRowFactory();
ConglomerateController heapCC;
ConstraintDescriptor cd = null;
ExecIndexRow indexRow1;
ExecRow outRow;
RowLocation baseRowLocation;
ScanController scanController;
TransactionController tc;
// Get the current transaction controller
tc = getTransactionCompile();
outRow = rf.makeEmptyRow();
heapCC = tc.openConglomerate(ti.getHeapConglomerate(), false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ);
/* Scan the index and go to the data pages for qualifying rows to
* build the column descriptor.
*/
scanController = tc.openScan(// conglomerate to open
ti.getIndexConglomerate(indexId), // don't hold open across commit
false, (forUpdate) ? TransactionController.OPENMODE_FORUPDATE : 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ, // all fields as objects
(FormatableBitSet) null, // start position - exact key match.
keyRow.getRowArray(), // startSearchOperation
ScanController.GE, // scanQualifier,
null, // stop position - exact key match.
keyRow.getRowArray(), // stopSearchOperation
ScanController.GT);
while (scanController.next()) {
SubConstraintDescriptor subCD = null;
// create an index row template
indexRow1 = getIndexRowFromHeapRow(ti.getIndexRowGenerator(indexId), heapCC.newRowLocationTemplate(), outRow);
scanController.fetch(indexRow1.getRowArray());
baseRowLocation = (RowLocation) indexRow1.getColumn(indexRow1.nColumns());
boolean base_row_exists = heapCC.fetch(baseRowLocation, outRow.getRowArray(), (FormatableBitSet) null);
if (SanityManager.DEBUG) {
// it can not be possible for heap row to disappear while
// holding scan cursor on index at ISOLATION_REPEATABLE_READ.
SanityManager.ASSERT(base_row_exists, "base row doesn't exist");
}
switch(rf.getConstraintType(outRow)) {
case DataDictionary.PRIMARYKEY_CONSTRAINT:
case DataDictionary.FOREIGNKEY_CONSTRAINT:
case DataDictionary.UNIQUE_CONSTRAINT:
subCD = getSubKeyConstraint(rf.getConstraintId(outRow), rf.getConstraintType(outRow));
break;
case DataDictionary.CHECK_CONSTRAINT:
subCD = getSubCheckConstraint(rf.getConstraintId(outRow));
break;
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("unexpected value " + "from rf.getConstraintType(outRow)" + rf.getConstraintType(outRow));
}
}
if (SanityManager.DEBUG) {
SanityManager.ASSERT(subCD != null, "subCD is expected to be non-null");
}
/* Cache the TD in the SCD so that
* the row factory doesn't need to go
* out to disk to get it.
*/
subCD.setTableDescriptor(td);
cd = (ConstraintDescriptor) rf.buildDescriptor(outRow, subCD, this);
/* If dList is null, then caller only wants a single descriptor - we're done
* else just add the current descriptor to the list.
*/
if (dList == null) {
break;
} else {
dList.add(cd);
}
}
scanController.close();
heapCC.close();
return cd;
}
use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class SequenceUpdater method setIdentity.
/**
* @see Cacheable#setIdentity
*
* @exception StandardException Thrown on error
*/
public Cacheable setIdentity(Object key) throws StandardException {
if (SanityManager.DEBUG) {
if (!(key instanceof String)) {
SanityManager.THROWASSERT("Key for a SequenceUpdater is a " + key.getClass().getName());
}
if ((_uuidString != null) || (_sequenceGenerator != null)) {
SanityManager.THROWASSERT("Identity being changed on a live cacheable. Old uuidString = " + _uuidString);
}
}
_uuidString = (String) key;
if (_sequenceGenerator == null) {
TransactionController executionTC = getLCC().getTransactionExecute();
//
// We lookup information in a read-only subtransaction in order to minimize
// contention. Since this is a read-only subtransaction, there should be
// no conflict with the parent transaction.
//
TransactionController subTransaction = executionTC.startNestedUserTransaction(true, true);
try {
_sequenceGenerator = createSequenceGenerator(subTransaction);
} finally {
// if we failed to get a generator, we have no identity. see DERBY-5389.
if (_sequenceGenerator == null) {
_uuidString = null;
}
subTransaction.commit();
subTransaction.destroy();
}
}
if (_sequenceGenerator != null) {
return this;
} else {
return null;
}
}
use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class InsertNode method bindStatement.
/**
* Bind this InsertNode. This means looking up tables and columns and
* getting their types, and figuring out the result types of all
* expressions, as well as doing view resolution, permissions checking,
* etc.
* <p>
* Binding an insert will also massage the tree so that
* the collist and select column order/number are the
* same as the layout of the table in the store.
*
* @exception StandardException Thrown on error
*/
@Override
public void bindStatement() throws StandardException {
// We just need select privilege on the expressions
getCompilerContext().pushCurrentPrivType(Authorizer.SELECT_PRIV);
FromList fromList = new FromList(getOptimizerFactory().doJoinOrderOptimization(), getContextManager());
/* If any underlying ResultSetNode is a SelectNode, then we
* need to do a full bind(), including the expressions
* (since the fromList may include a FromSubquery).
*/
DataDictionary dataDictionary = getDataDictionary();
super.bindResultSetsWithTables(dataDictionary);
/*
** Get the TableDescriptor for the table we are inserting into
*/
verifyTargetTable();
// Check the validity of the targetProperties, if they exist
if (targetProperties != null) {
verifyTargetProperties(dataDictionary);
}
/*
** Get the resultColumnList representing the columns in the base
** table or VTI. We don't bother adding any permission checks here
** because they are assumed by INSERT permission on the table.
*/
IgnoreFilter ignorePermissions = new IgnoreFilter();
getCompilerContext().addPrivilegeFilter(ignorePermissions);
getResultColumnList();
/* If we have a target column list, then it must have the same # of
* entries as the result set's RCL.
*/
if (targetColumnList != null) {
/*
* Normalize synonym qualifers for column references.
*/
if (synonymTableName != null) {
normalizeSynonymColumns(targetColumnList, targetTableName);
}
/* Bind the target column list */
getCompilerContext().pushCurrentPrivType(getPrivType());
if (targetTableDescriptor != null) {
targetColumnList.bindResultColumnsByName(targetTableDescriptor, (DMLStatementNode) this);
} else {
targetColumnList.bindResultColumnsByName(targetVTI.getResultColumns(), targetVTI, this);
}
getCompilerContext().popCurrentPrivType();
}
getCompilerContext().removePrivilegeFilter(ignorePermissions);
/* Verify that all underlying ResultSets reclaimed their FromList */
if (SanityManager.DEBUG) {
SanityManager.ASSERT(fromList.size() == 0, "fromList.size() is expected to be 0, not " + fromList.size() + " on return from RS.bindExpressions()");
}
/* Replace any DEFAULTs with the associated tree, or flag DEFAULTs if
* not allowed (inside top level set operator nodes). Subqueries are
* checked for illegal DEFAULTs elsewhere.
*/
boolean isTableConstructor = (resultSet instanceof UnionNode && ((UnionNode) resultSet).tableConstructor()) || resultSet instanceof RowResultSetNode;
//
// For the MERGE statement, DEFAULT expressions in the SELECT node
// may have been replaced with generated expressions already.
//
ResultColumnList tempRCL = resultSet.getResultColumns();
boolean defaultsWereReplaced = false;
for (int i = 0; i < tempRCL.size(); i++) {
ResultColumn rc = tempRCL.getResultColumn(i + 1);
if (rc.wasDefaultColumn()) {
defaultsWereReplaced = true;
}
}
resultSet.replaceOrForbidDefaults(targetTableDescriptor, targetColumnList, isTableConstructor);
/* Bind the expressions now that the result columns are bound
* NOTE: This will be the 2nd time for those underlying ResultSets
* that have tables (no harm done), but it is necessary for those
* that do not have tables. It's too hard/not work the effort to
* avoid the redundancy.
*/
super.bindExpressions();
//
if (isPrivilegeCollectionRequired()) {
getCompilerContext().pushCurrentPrivType(getPrivType());
getCompilerContext().addRequiredTablePriv(targetTableDescriptor);
getCompilerContext().popCurrentPrivType();
}
// Now stop adding permissions checks.
getCompilerContext().addPrivilegeFilter(ignorePermissions);
/*
** If the result set is a union, it could be a table constructor.
** Bind any nulls in the result columns of the table constructor
** to the types of the table being inserted into.
**
** The types of ? parameters in row constructors and table constructors
** in an INSERT statement come from the result columns.
**
** If there is a target column list, use that instead of the result
** columns for the whole table, since the columns in the result set
** correspond to the target column list.
*/
if (targetColumnList != null) {
if (resultSet.getResultColumns().visibleSize() > targetColumnList.size())
throw StandardException.newException(SQLState.LANG_DB2_INVALID_COLS_SPECIFIED);
resultSet.bindUntypedNullsToResultColumns(targetColumnList);
resultSet.setTableConstructorTypes(targetColumnList);
} else {
if (resultSet.getResultColumns().visibleSize() > resultColumnList.size())
throw StandardException.newException(SQLState.LANG_DB2_INVALID_COLS_SPECIFIED);
resultSet.bindUntypedNullsToResultColumns(resultColumnList);
resultSet.setTableConstructorTypes(resultColumnList);
}
/* Bind the columns of the result set to their expressions */
resultSet.bindResultColumns(fromList);
int resCols = resultSet.getResultColumns().visibleSize();
DataDictionary dd = getDataDictionary();
if (targetColumnList != null) {
if (targetColumnList.size() != resCols)
throw StandardException.newException(SQLState.LANG_DB2_INVALID_COLS_SPECIFIED);
} else {
if (targetTableDescriptor != null && targetTableDescriptor.getNumberOfColumns() != resCols)
throw StandardException.newException(SQLState.LANG_DB2_INVALID_COLS_SPECIFIED);
}
/* See if the ResultSet's RCL needs to be ordered to match the target
* list, or "enhanced" to accommodate defaults. It can only need to
* be ordered if there is a target column list. It needs to be
* enhanced if there are fewer source columns than there are columns
* in the table.
*/
boolean inOrder = true;
int numTableColumns = resultColumnList.size();
/* colMap[] will be the size of the target list, which could be larger
* than the current size of the source list. In that case, the source
* list will be "enhanced" to include defaults.
*/
int[] colMap = new int[numTableColumns];
// set the fields to an unused value
for (int i = 0; i < colMap.length; i++) {
colMap[i] = -1;
}
/* Create the source/target list mapping */
if (targetColumnList != null) {
/*
** There is a target column list, so the result columns might
** need to be ordered. Step through the target column list
** and remember the position in the target table of each column.
** Remember if any of the columns are out of order.
*/
int targetSize = targetColumnList.size();
for (int index = 0; index < targetSize; index++) {
int position = targetColumnList.elementAt(index).getColumnDescriptor().getPosition();
if (index != position - 1) {
inOrder = false;
}
// position is 1-base; colMap indexes and entries are 0-based.
colMap[position - 1] = index;
}
} else {
/*
** There is no target column list, so the result columns in the
** source are presumed to be in the same order as the target
** table.
*/
for (int position = 0; position < resultSet.getResultColumns().visibleSize(); position++) {
colMap[position] = position;
}
}
// Bind the ORDER BY columns
if (orderByList != null) {
orderByList.pullUpOrderByColumns(resultSet);
// The select list may have new columns now, make sure to bind
// those.
super.bindExpressions();
orderByList.bindOrderByColumns(resultSet);
}
bindOffsetFetch(offset, fetchFirst);
resultSet = enhanceAndCheckForAutoincrement(resultSet, inOrder, colMap, defaultsWereReplaced);
resultColumnList.checkStorableExpressions(resultSet.getResultColumns());
/* Insert a NormalizeResultSetNode above the source if the source
* and target column types and lengths do not match.
*/
if (!resultColumnList.columnTypesAndLengthsMatch(resultSet.getResultColumns())) {
resultSet = new NormalizeResultSetNode(resultSet, resultColumnList, null, false, getContextManager());
}
if (targetTableDescriptor != null) {
ResultColumnList sourceRCL = resultSet.getResultColumns();
sourceRCL.copyResultColumnNames(resultColumnList);
/* bind all generation clauses for generated columns */
parseAndBindGenerationClauses(dataDictionary, targetTableDescriptor, sourceRCL, resultColumnList, false, null);
/* Get and bind all constraints on the table */
boolean[] hasDCC = new boolean[] { false /* a priori*/
};
checkConstraints = bindConstraints(dataDictionary, getOptimizerFactory(), targetTableDescriptor, null, sourceRCL, (int[]) null, (FormatableBitSet) null, // we always include triggers in core language
true, hasDCC);
hasDeferrableCheckConstraints = hasDCC[0];
/*
** Deferred if:
** If the target table is also a source table
** Self-referencing foreign key constraint
** trigger
*/
if (resultSet.referencesTarget(targetTableDescriptor.getName(), true) || requiresDeferredProcessing()) {
deferred = true;
/* Disallow bulk insert replace when target table
* is also a source table.
*/
if (bulkInsertReplace && resultSet.referencesTarget(targetTableDescriptor.getName(), true)) {
throw StandardException.newException(SQLState.LANG_INVALID_BULK_INSERT_REPLACE, targetTableDescriptor.getQualifiedName());
}
}
/* Get the list of indexes on the table being inserted into */
getAffectedIndexes(targetTableDescriptor);
TransactionController tc = getLanguageConnectionContext().getTransactionCompile();
autoincRowLocation = dd.computeAutoincRowLocations(tc, targetTableDescriptor);
} else {
deferred = VTIDeferModPolicy.deferIt(DeferModification.INSERT_STATEMENT, targetVTI, null, resultSet);
}
identitySequenceUUIDString = getUUIDofSequenceGenerator();
getCompilerContext().removePrivilegeFilter(ignorePermissions);
getCompilerContext().popCurrentPrivType();
}
Aggregations