use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class CreateTableConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for CREATE TABLE.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID toid;
SchemaDescriptor schemaDescriptor;
ColumnDescriptor columnDescriptor;
ExecRow template;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/* Mark the activation as being for create table */
activation.setForCreateTable();
// setup for create conglomerate call:
// o create row template to tell the store what type of rows this
// table holds.
// o create array of collation id's to tell collation id of each
// column in table.
template = RowUtil.getEmptyValueRow(columnInfo.length, lcc);
int[] collation_ids = new int[columnInfo.length];
for (int ix = 0; ix < columnInfo.length; ix++) {
ColumnInfo col_info = columnInfo[ix];
if (col_info.defaultValue != null) {
/* If there is a default value, use it, otherwise use null */
template.setColumn(ix + 1, col_info.defaultValue);
} else {
template.setColumn(ix + 1, col_info.dataType.getNull());
}
// get collation info for each column.
collation_ids[ix] = col_info.dataType.getCollationType();
}
/* create the conglomerate to hold the table's rows
* RESOLVE - If we ever have a conglomerate creator
* that lets us specify the conglomerate number then
* we will need to handle it here.
*/
long conglomId = tc.createConglomerate(// we're requesting a heap conglomerate
"heap", // row template
template.getRowArray(), // column sort order - not required for heap
null, collation_ids, // properties
properties, tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE ? (TransactionController.IS_TEMPORARY | TransactionController.IS_KEPT) : TransactionController.IS_DEFAULT);
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
if (tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE)
dd.startWriting(lcc);
SchemaDescriptor sd;
if (tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE)
sd = dd.getSchemaDescriptor(schemaName, tc, true);
else
sd = DDLConstantAction.getSchemaDescriptorForCreate(dd, activation, schemaName);
//
// Create a new table descriptor.
//
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
if (tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
td = ddg.newTableDescriptor(tableName, sd, tableType, lockGranularity);
dd.addDescriptor(td, sd, DataDictionary.SYSTABLES_CATALOG_NUM, false, tc);
} else {
td = ddg.newTableDescriptor(tableName, sd, tableType, onCommitDeleteRows, onRollbackDeleteRows);
td.setUUID(dd.getUUIDFactory().createUUID());
}
toid = td.getUUID();
// Save the TableDescriptor off in the Activation
activation.setDDLTableDescriptor(td);
/* NOTE: We must write the columns out to the system
* tables before any of the conglomerates, including
* the heap, since we read the columns before the
* conglomerates when building a TableDescriptor.
* This will hopefully reduce the probability of
* a deadlock involving those system tables.
*/
// for each column, stuff system.column
int index = 1;
ColumnDescriptor[] cdlArray = new ColumnDescriptor[columnInfo.length];
for (int ix = 0; ix < columnInfo.length; ix++) {
UUID defaultUUID = columnInfo[ix].newDefaultUUID;
/* Generate a UUID for the default, if one exists
* and there is no default id yet.
*/
if (columnInfo[ix].defaultInfo != null && defaultUUID == null) {
defaultUUID = dd.getUUIDFactory().createUUID();
}
if (// dealing with autoinc column
columnInfo[ix].autoincInc != 0) {
columnDescriptor = new ColumnDescriptor(columnInfo[ix].name, index++, columnInfo[ix].dataType, columnInfo[ix].defaultValue, columnInfo[ix].defaultInfo, td, defaultUUID, columnInfo[ix].autoincStart, columnInfo[ix].autoincInc, columnInfo[ix].autoinc_create_or_modify_Start_Increment, columnInfo[ix].autoincCycle);
//
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, null)) {
CreateSequenceConstantAction csca = makeCSCA(columnInfo[ix], TableDescriptor.makeSequenceName(toid));
csca.executeConstantAction(activation);
}
} else {
columnDescriptor = new ColumnDescriptor(columnInfo[ix].name, index++, columnInfo[ix].dataType, columnInfo[ix].defaultValue, columnInfo[ix].defaultInfo, td, defaultUUID, columnInfo[ix].autoincStart, columnInfo[ix].autoincInc, columnInfo[ix].autoincCycle);
}
cdlArray[ix] = columnDescriptor;
}
if (tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
dd.addDescriptorArray(cdlArray, td, DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
}
// now add the column descriptors to the table.
ColumnDescriptorList cdl = td.getColumnDescriptorList();
for (int i = 0; i < cdlArray.length; i++) cdl.add(cdlArray[i]);
//
// Create a conglomerate desciptor with the conglomId filled in and
// add it.
//
// RESOLVE: Get information from the conglomerate descriptor which
// was provided.
//
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, null, false, null, false, null, toid, sd.getUUID());
if (tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
}
// add the newly added conglomerate to the table descriptor
ConglomerateDescriptorList conglomList = td.getConglomerateDescriptorList();
conglomList.add(cgd);
/* Create any constraints */
if (constraintActions != null) {
/*
** Do everything but FK constraints first,
** then FK constraints on 2nd pass.
*/
for (int conIndex = 0; conIndex < constraintActions.length; conIndex++) {
// skip fks
if (!constraintActions[conIndex].isForeignKeyConstraint()) {
constraintActions[conIndex].executeConstantAction(activation);
}
}
for (int conIndex = 0; conIndex < constraintActions.length; conIndex++) {
// only foreign keys
if (constraintActions[conIndex].isForeignKeyConstraint()) {
constraintActions[conIndex].executeConstantAction(activation);
}
}
}
//
for (int ix = 0; ix < columnInfo.length; ix++) {
addColumnDependencies(lcc, dd, td, columnInfo[ix]);
}
//
// The table itself can depend on the user defined types of its columns.
//
adjustUDTDependencies(lcc, dd, td, columnInfo, false);
if (tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
lcc.addDeclaredGlobalTempTable(td);
}
// Indicate that the CREATE TABLE statement itself depends on the
// table it is creating. Normally such statement dependencies are
// added during compilation, but here we have a bootstrapping issue
// because the table doesn't exist until the CREATE TABLE statement
// has been executed, so we had to defer the creation of this
// dependency until now. (DERBY-4479)
dd.getDependencyManager().addDependency(activation.getPreparedStatement(), td, lcc.getContextManager());
}
use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class CreateTriggerConstantAction method executeConstantAction.
/**
* This is the guts of the Execution-time logic for CREATE TRIGGER.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
SPSDescriptor whenspsd = null;
SPSDescriptor actionspsd;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Indicate that we are about to modify the data dictionary.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
SchemaDescriptor triggerSd = getSchemaDescriptorForCreate(dd, activation, triggerSchemaName);
if (spsCompSchemaId == null) {
SchemaDescriptor def = lcc.getDefaultSchema();
if (def.getUUID() == null) {
// Descriptor for default schema is stale,
// look it up in the dictionary
def = dd.getSchemaDescriptor(def.getDescriptorName(), tc, false);
}
/*
** It is possible for spsCompSchemaId to be null. For instance,
** the current schema may not have been physically created yet but
** it exists "virtually". In this case, its UUID will have the
** value of null meaning that it is not persistent. e.g.:
**
** CONNECT 'db;create=true' user 'ernie';
** CREATE TABLE bert.t1 (i INT);
** CREATE TRIGGER bert.tr1 AFTER INSERT ON bert.t1
** FOR EACH STATEMENT MODE DB2SQL
** SELECT * FROM SYS.SYSTABLES;
**
** Note that in the above case, the trigger action statement have a
** null compilation schema. A compilation schema with null value
** indicates that the trigger action statement text does not have
** any dependencies with the CURRENT SCHEMA. This means:
**
** o It is safe to compile this statement in any schema since
** there is no dependency with the CURRENT SCHEMA. i.e.: All
** relevent identifiers are qualified with a specific schema.
**
** o The statement cache mechanism can utilize this piece of
** information to enable better statement plan sharing across
** connections in different schemas; thus, avoiding unnecessary
** statement compilation.
*/
if (def != null)
spsCompSchemaId = def.getUUID();
}
String tabName;
if (triggerTable != null) {
triggerTableId = triggerTable.getUUID();
tabName = triggerTable.getName();
} else
tabName = "with UUID " + triggerTableId;
/* We need to get table descriptor again. We simply can't trust the
* one we got at compile time, the lock on system table was released
* when compile was done, and the table might well have been dropped.
*/
triggerTable = dd.getTableDescriptor(triggerTableId);
if (triggerTable == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tabName);
}
/* Lock the table for DDL. Otherwise during our execution, the table
* might be changed, even dropped. Beetle 4269
*/
lockTableForDDL(tc, triggerTable.getHeapConglomerateId(), true);
/* get triggerTable again for correctness, in case it's changed before
* the lock is aquired
*/
triggerTable = dd.getTableDescriptor(triggerTableId);
if (triggerTable == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tabName);
}
/*
** Send an invalidate on the table from which
** the triggering event emanates. This it
** to make sure that DML statements on this table
** will be recompiled. Do this before we create
** our trigger spses lest we invalidate them just
** after creating them.
*/
dm.invalidateFor(triggerTable, DependencyManager.CREATE_TRIGGER, lcc);
/*
** Lets get our trigger id up front, we'll use it when
** we create our spses.
*/
UUID tmpTriggerId = dd.getUUIDFactory().createUUID();
actionSPSId = (actionSPSId == null) ? dd.getUUIDFactory().createUUID() : actionSPSId;
if (whenSPSId == null && whenText != null) {
whenSPSId = dd.getUUIDFactory().createUUID();
}
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
/*
** Create the trigger descriptor first so the trigger action
** compilation can pick up the relevant trigger especially in
** the case of self triggering.
*/
TriggerDescriptor triggerd = ddg.newTriggerDescriptor(triggerSd, tmpTriggerId, triggerName, eventMask, isBefore, isRow, isEnabled, triggerTable, whenSPSId, actionSPSId, makeCreationTimestamp(dd), referencedCols, referencedColsInTriggerAction, originalActionText, referencingOld, referencingNew, oldReferencingName, newReferencingName, originalWhenText);
dd.addDescriptor(triggerd, triggerSd, DataDictionary.SYSTRIGGERS_CATALOG_NUM, false, tc);
/*
** If we have a WHEN action we create it now.
*/
if (whenText != null) {
// The WHEN clause is just a search condition and not a full
// SQL statement. Turn in into a VALUES statement.
String whenValuesStmt = "VALUES " + whenText;
whenspsd = createSPS(lcc, ddg, dd, tc, tmpTriggerId, triggerSd, whenSPSId, spsCompSchemaId, whenValuesStmt, true, triggerTable);
}
/*
** Create the trigger action
*/
actionspsd = createSPS(lcc, ddg, dd, tc, tmpTriggerId, triggerSd, actionSPSId, spsCompSchemaId, actionText, false, triggerTable);
/*
** Make underlying spses dependent on the trigger.
*/
if (whenspsd != null) {
dm.addDependency(triggerd, whenspsd, lcc.getContextManager());
}
dm.addDependency(triggerd, actionspsd, lcc.getContextManager());
dm.addDependency(triggerd, triggerTable, lcc.getContextManager());
// from the triggered statement or the WHEN clause.
for (ProviderInfo info : providerInfo) {
Provider provider = (Provider) info.getDependableFinder().getDependable(dd, info.getObjectId());
dm.addDependency(triggerd, provider, lcc.getContextManager());
}
// store trigger's dependency on various privileges in the dependeny system
storeViewTriggerDependenciesOnPrivileges(activation, triggerd);
}
use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class UpdateNode method bindStatement.
/**
* Bind this UpdateNode. This means looking up tables and columns and
* getting their types, and figuring out the result types of all
* expressions, as well as doing view resolution, permissions checking,
* etc.
* <p>
* Binding an update will also massage the tree so that
* the ResultSetNode has a set of columns to contain the old row
* value, followed by a set of columns to contain the new row
* value, followed by a column to contain the RowLocation of the
* row to be updated.
*
* @exception StandardException Thrown on error
*/
@Override
public void bindStatement() throws StandardException {
// We just need select privilege on the expressions
getCompilerContext().pushCurrentPrivType(Authorizer.SELECT_PRIV);
FromList fromList = new FromList(getOptimizerFactory().doJoinOrderOptimization(), getContextManager());
TableName cursorTargetTableName = null;
CurrentOfNode currentOfNode = null;
ResultColumnList afterColumns = null;
DataDictionary dataDictionary = getDataDictionary();
// check if targetTable is a synonym
if (targetTableName != null) {
TableName synonymTab = resolveTableToSynonym(this.targetTableName);
if (synonymTab != null) {
this.synonymTableName = targetTableName;
this.targetTableName = synonymTab;
}
}
//
if (inMatchingClause()) {
tagOriginalResultSetColumns();
}
// collect lists of objects which will require privilege checks
ArrayList<String> explicitlySetColumns = getExplicitlySetColumns();
List<CastNode> allCastNodes = collectAllCastNodes();
tagPrivilegedNodes();
// tell the compiler to only add privilege checks for nodes which have been tagged
TagFilter tagFilter = new TagFilter(TagFilter.NEED_PRIVS_FOR_UPDATE_STMT);
getCompilerContext().addPrivilegeFilter(tagFilter);
bindTables(dataDictionary);
// for positioned update, get the cursor's target table.
if (SanityManager.DEBUG) {
SanityManager.ASSERT((resultSet != null && resultSet instanceof SelectNode), "Update must have a select result set");
}
SelectNode sel;
sel = (SelectNode) resultSet;
targetTable = (FromTable) sel.fromList.elementAt(0);
if (targetTable instanceof CurrentOfNode) {
positionedUpdate = true;
currentOfNode = (CurrentOfNode) targetTable;
cursorTargetTableName = currentOfNode.getBaseCursorTargetTableName();
// instead of an assert, we might say the cursor is not updatable.
if (SanityManager.DEBUG) {
SanityManager.ASSERT(cursorTargetTableName != null);
}
}
if (targetTable instanceof FromVTI) {
targetVTI = (FromVTI) targetTable;
targetVTI.setTarget();
} else {
// we get it from the cursor supplying the position.
if (targetTableName == null) {
// verify we have current of
if (SanityManager.DEBUG)
SanityManager.ASSERT(cursorTargetTableName != null);
targetTableName = cursorTargetTableName;
} else // the named table is the same as the cursor's target.
if (cursorTargetTableName != null) {
// be the same as a correlation name in the cursor.
if (!targetTableName.equals(cursorTargetTableName)) {
throw StandardException.newException(SQLState.LANG_CURSOR_UPDATE_MISMATCH, targetTableName, currentOfNode.getCursorName());
}
}
}
// because we verified that the tables match
// and we already bound the cursor or the select,
// the table descriptor should always be found.
verifyTargetTable();
// add UPDATE_PRIV on all columns on the left side of SET operators
addUpdatePriv(explicitlySetColumns);
/* Verify that all underlying ResultSets reclaimed their FromList */
if (SanityManager.DEBUG) {
SanityManager.ASSERT(fromList.size() == 0, "fromList.size() is expected to be 0, not " + fromList.size() + " on return from RS.bindExpressions()");
}
//
// Add generated columns whose generation clauses mention columns
// in the user's original update list.
//
ColumnDescriptorList addedGeneratedColumns = new ColumnDescriptorList();
ColumnDescriptorList affectedGeneratedColumns = new ColumnDescriptorList();
addGeneratedColumns(targetTableDescriptor, resultSet, affectedGeneratedColumns, addedGeneratedColumns);
/*
** The current result column list is the one supplied by the user.
** Mark these columns as "updated", so we can tell later which
** columns are really being updated, and which have been added
** but are not really being updated.
*/
resultSet.getResultColumns().markUpdated();
/* Prepend CurrentRowLocation() to the select's result column list. */
if (SanityManager.DEBUG)
SanityManager.ASSERT((resultSet.getResultColumns() != null), "resultColumns is expected not to be null at bind time");
/* Normalize the SET clause's result column list for synonym */
if (synonymTableName != null)
normalizeSynonymColumns(resultSet.getResultColumns(), targetTable);
/* Bind the original result columns by column name */
normalizeCorrelatedColumns(resultSet.getResultColumns(), targetTable);
resultSet.bindResultColumns(targetTableDescriptor, targetVTI, resultSet.getResultColumns(), this, fromList);
// don't allow overriding of generation clauses
forbidGenerationOverrides(resultSet.getResultColumns(), addedGeneratedColumns);
// the code for old way of generating unique ids.
if (dataDictionary.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, null)) {
// Replace any DEFAULTs with the associated tree for the default if
// allowed, otherwise throw an exception
resultSet.getResultColumns().replaceOrForbidDefaults(targetTableDescriptor, resultSet.getResultColumns(), true);
resultSet.getResultColumns().checkForInvalidDefaults();
resultSet.getResultColumns().forbidOverrides(resultSet.getResultColumns());
} else {
LanguageConnectionContext lcc = getLanguageConnectionContext();
if (lcc.getAutoincrementUpdate() == false)
resultSet.getResultColumns().forbidOverrides(null);
}
/*
** Mark the columns in this UpdateNode's result column list as
** updateable in the ResultColumnList of the table being updated.
** only do this for FromBaseTables - if the result table is a
** CurrentOfNode, it already knows what columns in its cursor
** are updateable.
*/
boolean allColumns = false;
if (targetTable instanceof FromBaseTable) {
((FromBaseTable) targetTable).markUpdated(resultSet.getResultColumns());
} else if ((targetTable instanceof FromVTI) || (targetTable instanceof FromSubquery)) {
resultColumnList = resultSet.getResultColumns();
} else {
/*
** Positioned update: WHERE CURRENT OF
*/
if (SanityManager.DEBUG) {
SanityManager.ASSERT(currentOfNode != null, "currentOfNode is null");
}
ExecPreparedStatement cursorStmt = currentOfNode.getCursorStatement();
/*
** If there is no update column list, we need to build
** out the result column list to have all columns.
*/
if (!cursorStmt.hasUpdateColumns()) {
/*
** Get the resultColumnList representing ALL of the columns in the
** base table. This is the "before" portion of the result row.
*/
getResultColumnList();
/*
** Add the "after" portion of the result row. This is the update
** list augmented to include every column in the target table.
** Those columns that are not being updated are set to themselves.
** The expanded list will be in the order of the columns in the base
** table.
*/
afterColumns = resultSet.getResultColumns().expandToAll(targetTableDescriptor, targetTable.getTableName());
/*
** Need to get all indexes here since we aren't calling
** getReadMap().
*/
getAffectedIndexes(targetTableDescriptor, (ResultColumnList) null, (FormatableBitSet) null);
allColumns = true;
} else {
/* Check the updatability */
resultSet.getResultColumns().checkColumnUpdateability(cursorStmt, currentOfNode.getCursorName());
}
}
changedColumnIds = getChangedColumnIds(resultSet.getResultColumns());
//
// Trigger transition tables are implemented as VTIs. This short-circuits some
// necessary steps if the source table of a MERGE statement is a trigger
// transition table. The following boolean is meant to prevent that short-circuiting.
//
boolean needBaseColumns = (targetVTI == null) || inMatchingClause();
/*
** We need to add in all the columns that are needed
** by the constraints on this table.
*/
if (!allColumns && needBaseColumns) {
getCompilerContext().pushCurrentPrivType(Authorizer.NULL_PRIV);
try {
readColsBitSet = new FormatableBitSet();
FromBaseTable fbt = getResultColumnList(resultSet.getResultColumns());
afterColumns = resultSet.getResultColumns().copyListAndObjects();
readColsBitSet = getReadMap(dataDictionary, targetTableDescriptor, afterColumns, affectedGeneratedColumns);
afterColumns = fbt.addColsToList(afterColumns, readColsBitSet);
resultColumnList = fbt.addColsToList(resultColumnList, readColsBitSet);
/*
** If all bits are set, then behave as if we chose all
** in the first place
*/
int i = 1;
int size = targetTableDescriptor.getMaxColumnID();
for (; i <= size; i++) {
if (!readColsBitSet.get(i)) {
break;
}
}
if (i > size) {
readColsBitSet = null;
}
} finally {
getCompilerContext().popCurrentPrivType();
}
}
ValueNode rowLocationNode;
if (needBaseColumns) {
/* Append the list of "after" columns to the list of "before" columns,
* preserving the afterColumns list. (Necessary for binding
* check constraints.)
*/
resultColumnList.appendResultColumns(afterColumns, false);
/* Generate the RowLocation column */
rowLocationNode = new CurrentRowLocationNode(getContextManager());
} else {
rowLocationNode = new NumericConstantNode(TypeId.getBuiltInTypeId(Types.INTEGER), 0, getContextManager());
}
ResultColumn rowLocationColumn = new ResultColumn(COLUMNNAME, rowLocationNode, getContextManager());
rowLocationColumn.markGenerated();
/* Append to the ResultColumnList */
resultColumnList.addResultColumn(rowLocationColumn);
/*
* The last thing that we do to the generated RCL is to clear
* the table name out from each RC. See comment on
* checkTableNameAndScrubResultColumns().
*/
checkTableNameAndScrubResultColumns(resultColumnList);
/* Set the new result column list in the result set */
resultSet.setResultColumns(resultColumnList);
//
if (inMatchingClause()) {
associateAddedColumns();
}
// SQL 2011, section 6.10, SR 4b.
SelectNode.checkNoWindowFunctions(resultSet, "<update source>");
/* Bind the expressions */
super.bindExpressions();
/* Bind untyped nulls directly under the result columns */
resultSet.getResultColumns().bindUntypedNullsToResultColumns(resultColumnList);
/* Bind the new ResultColumn */
rowLocationColumn.bindResultColumnToExpression();
resultColumnList.checkStorableExpressions();
/* Insert a NormalizeResultSetNode above the source if the source
* and target column types and lengths do not match.
*/
if (!resultColumnList.columnTypesAndLengthsMatch()) {
resultSet = new NormalizeResultSetNode(resultSet, resultColumnList, null, true, getContextManager());
if (hasCheckConstraints(dataDictionary, targetTableDescriptor) || hasGenerationClauses(targetTableDescriptor)) {
/* Get and bind all check constraints and generated columns on the columns
* being updated. We want to bind the check constraints and
* generated columns against
* the after columns. We need to bind against the portion of the
* resultColumns in the new NormalizeResultSet that point to
* afterColumns. Create an RCL composed of just those RCs in
* order to bind the check constraints.
*/
int afterColumnsSize = afterColumns.size();
afterColumns = new ResultColumnList(getContextManager());
ResultColumnList normalizedRCs = resultSet.getResultColumns();
for (int index = 0; index < afterColumnsSize; index++) {
afterColumns.addElement(normalizedRCs.elementAt(index + afterColumnsSize));
}
}
}
if (null != targetVTI && !inMatchingClause()) {
deferred = VTIDeferModPolicy.deferIt(DeferModification.UPDATE_STATEMENT, targetVTI, resultColumnList.getColumnNames(), sel.getWhereClause());
} else // not VTI
{
/* we always include triggers in core language */
boolean hasTriggers = (getAllRelevantTriggers(dataDictionary, targetTableDescriptor, changedColumnIds, true).size() > 0);
ResultColumnList sourceRCL = hasTriggers ? resultColumnList : afterColumns;
/* bind all generation clauses for generated columns */
parseAndBindGenerationClauses(dataDictionary, targetTableDescriptor, afterColumns, resultColumnList, true, resultSet);
/* Get and bind all constraints on the columns being updated */
checkConstraints = bindConstraints(dataDictionary, getOptimizerFactory(), targetTableDescriptor, null, sourceRCL, changedColumnIds, readColsBitSet, true, /* we always include triggers in core language */
new boolean[1]);
/* If the target table is also a source table, then
* the update will have to be in deferred mode
* For updates, this means that the target table appears in a
* subquery. Also, self referencing foreign keys are
* deferred. And triggers cause an update to be deferred.
*/
if (resultSet.subqueryReferencesTarget(targetTableDescriptor.getName(), true) || requiresDeferredProcessing()) {
deferred = true;
}
TransactionController tc = getLanguageConnectionContext().getTransactionCompile();
autoincRowLocation = dataDictionary.computeAutoincRowLocations(tc, targetTableDescriptor);
}
identitySequenceUUIDString = getUUIDofSequenceGenerator();
getCompilerContext().popCurrentPrivType();
getCompilerContext().removePrivilegeFilter(tagFilter);
//
for (CastNode value : allCastNodes) {
addUDTUsagePriv(value);
}
}
use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class GenericAuthorizer method authorize.
public void authorize(List<StatementPermission> requiredPermissionsList, Activation activation) throws StandardException {
DataDictionary dd = lcc.getDataDictionary();
// requiredPermissionsList for Database Owner
if (requiredPermissionsList != null && !requiredPermissionsList.isEmpty() && !lcc.getCurrentUserId(activation).equals(dd.getAuthorizationDatabaseOwner())) {
int ddMode = dd.startReading(lcc);
/*
* The system may need to read the permission descriptor(s)
* from the system table(s) if they are not available in the
* permission cache. So start an internal read-only nested
* transaction for this.
*
* The reason to use a nested transaction here is to not hold
* locks on system tables on a user transaction. e.g.: when
* attempting to revoke an user, the statement may time out
* since the user-to-be-revoked transaction may have acquired
* shared locks on the permission system tables; hence, this
* may not be desirable.
*
* All locks acquired by StatementPermission object's check()
* method will be released when the system ends the nested
* transaction.
*
* In Derby, the locks from read nested transactions come from
* the same space as the parent transaction; hence, they do not
* conflict with parent locks.
*/
lcc.beginNestedTransaction(true);
try {
try {
// perform the permission checking
for (StatementPermission rp : requiredPermissionsList) {
rp.check(lcc, false, activation);
}
} finally {
dd.doneReading(ddMode, lcc);
}
} finally {
// make sure we commit; otherwise, we will end up with
// mismatch nested level in the language connection context.
lcc.commitNestedTransaction();
}
}
}
use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class RenameConstantAction method execGutsRenameColumn.
// do necessary work for rename column at execute time.
private void execGutsRenameColumn(TableDescriptor td, Activation activation) throws StandardException {
ColumnDescriptor columnDescriptor = null;
int columnPosition = 0;
ConstraintDescriptorList constraintDescriptorList;
ConstraintDescriptor constraintDescriptor;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/* get the column descriptor for column to be renamed and
* using it's position in the table, set the referenced
* column map of the table indicating which column is being
* renamed. Dependency Manager uses this to find out the
* dependents on the column.
*/
columnDescriptor = td.getColumnDescriptor(oldObjectName);
if (columnDescriptor.isAutoincrement())
columnDescriptor.setAutoinc_create_or_modify_Start_Increment(ColumnDefinitionNode.CREATE_AUTOINCREMENT);
columnPosition = columnDescriptor.getPosition();
FormatableBitSet toRename = new FormatableBitSet(td.getColumnDescriptorList().size() + 1);
toRename.set(columnPosition);
td.setReferencedColumnMap(toRename);
dm.invalidateFor(td, DependencyManager.RENAME, lcc);
// look for foreign key dependency on the column.
constraintDescriptorList = dd.getConstraintDescriptors(td);
for (int index = 0; index < constraintDescriptorList.size(); index++) {
constraintDescriptor = constraintDescriptorList.elementAt(index);
int[] referencedColumns = constraintDescriptor.getReferencedColumns();
int numRefCols = referencedColumns.length;
for (int j = 0; j < numRefCols; j++) {
if ((referencedColumns[j] == columnPosition) && (constraintDescriptor instanceof ReferencedKeyConstraintDescriptor))
dm.invalidateFor(constraintDescriptor, DependencyManager.RENAME, lcc);
}
}
// Drop the column
dd.dropColumnDescriptor(td.getUUID(), oldObjectName, tc);
columnDescriptor.setColumnName(newObjectName);
dd.addDescriptor(columnDescriptor, td, DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
// Need to do following to reload the cache so that table
// descriptor now has new column name
td = dd.getTableDescriptor(td.getObjectID());
}
Aggregations