use of org.apache.derby.iapi.sql.execute.ExecPreparedStatement in project derby by apache.
the class UpdateNode method bindStatement.
/**
* Bind this UpdateNode. This means looking up tables and columns and
* getting their types, and figuring out the result types of all
* expressions, as well as doing view resolution, permissions checking,
* etc.
* <p>
* Binding an update will also massage the tree so that
* the ResultSetNode has a set of columns to contain the old row
* value, followed by a set of columns to contain the new row
* value, followed by a column to contain the RowLocation of the
* row to be updated.
*
* @exception StandardException Thrown on error
*/
@Override
public void bindStatement() throws StandardException {
// We just need select privilege on the expressions
getCompilerContext().pushCurrentPrivType(Authorizer.SELECT_PRIV);
FromList fromList = new FromList(getOptimizerFactory().doJoinOrderOptimization(), getContextManager());
TableName cursorTargetTableName = null;
CurrentOfNode currentOfNode = null;
ResultColumnList afterColumns = null;
DataDictionary dataDictionary = getDataDictionary();
// check if targetTable is a synonym
if (targetTableName != null) {
TableName synonymTab = resolveTableToSynonym(this.targetTableName);
if (synonymTab != null) {
this.synonymTableName = targetTableName;
this.targetTableName = synonymTab;
}
}
//
if (inMatchingClause()) {
tagOriginalResultSetColumns();
}
// collect lists of objects which will require privilege checks
ArrayList<String> explicitlySetColumns = getExplicitlySetColumns();
List<CastNode> allCastNodes = collectAllCastNodes();
tagPrivilegedNodes();
// tell the compiler to only add privilege checks for nodes which have been tagged
TagFilter tagFilter = new TagFilter(TagFilter.NEED_PRIVS_FOR_UPDATE_STMT);
getCompilerContext().addPrivilegeFilter(tagFilter);
bindTables(dataDictionary);
// for positioned update, get the cursor's target table.
if (SanityManager.DEBUG) {
SanityManager.ASSERT((resultSet != null && resultSet instanceof SelectNode), "Update must have a select result set");
}
SelectNode sel;
sel = (SelectNode) resultSet;
targetTable = (FromTable) sel.fromList.elementAt(0);
if (targetTable instanceof CurrentOfNode) {
positionedUpdate = true;
currentOfNode = (CurrentOfNode) targetTable;
cursorTargetTableName = currentOfNode.getBaseCursorTargetTableName();
// instead of an assert, we might say the cursor is not updatable.
if (SanityManager.DEBUG) {
SanityManager.ASSERT(cursorTargetTableName != null);
}
}
if (targetTable instanceof FromVTI) {
targetVTI = (FromVTI) targetTable;
targetVTI.setTarget();
} else {
// we get it from the cursor supplying the position.
if (targetTableName == null) {
// verify we have current of
if (SanityManager.DEBUG)
SanityManager.ASSERT(cursorTargetTableName != null);
targetTableName = cursorTargetTableName;
} else // the named table is the same as the cursor's target.
if (cursorTargetTableName != null) {
// be the same as a correlation name in the cursor.
if (!targetTableName.equals(cursorTargetTableName)) {
throw StandardException.newException(SQLState.LANG_CURSOR_UPDATE_MISMATCH, targetTableName, currentOfNode.getCursorName());
}
}
}
// because we verified that the tables match
// and we already bound the cursor or the select,
// the table descriptor should always be found.
verifyTargetTable();
// add UPDATE_PRIV on all columns on the left side of SET operators
addUpdatePriv(explicitlySetColumns);
/* Verify that all underlying ResultSets reclaimed their FromList */
if (SanityManager.DEBUG) {
SanityManager.ASSERT(fromList.size() == 0, "fromList.size() is expected to be 0, not " + fromList.size() + " on return from RS.bindExpressions()");
}
//
// Add generated columns whose generation clauses mention columns
// in the user's original update list.
//
ColumnDescriptorList addedGeneratedColumns = new ColumnDescriptorList();
ColumnDescriptorList affectedGeneratedColumns = new ColumnDescriptorList();
addGeneratedColumns(targetTableDescriptor, resultSet, affectedGeneratedColumns, addedGeneratedColumns);
/*
** The current result column list is the one supplied by the user.
** Mark these columns as "updated", so we can tell later which
** columns are really being updated, and which have been added
** but are not really being updated.
*/
resultSet.getResultColumns().markUpdated();
/* Prepend CurrentRowLocation() to the select's result column list. */
if (SanityManager.DEBUG)
SanityManager.ASSERT((resultSet.getResultColumns() != null), "resultColumns is expected not to be null at bind time");
/* Normalize the SET clause's result column list for synonym */
if (synonymTableName != null)
normalizeSynonymColumns(resultSet.getResultColumns(), targetTable);
/* Bind the original result columns by column name */
normalizeCorrelatedColumns(resultSet.getResultColumns(), targetTable);
resultSet.bindResultColumns(targetTableDescriptor, targetVTI, resultSet.getResultColumns(), this, fromList);
// don't allow overriding of generation clauses
forbidGenerationOverrides(resultSet.getResultColumns(), addedGeneratedColumns);
// the code for old way of generating unique ids.
if (dataDictionary.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, null)) {
// Replace any DEFAULTs with the associated tree for the default if
// allowed, otherwise throw an exception
resultSet.getResultColumns().replaceOrForbidDefaults(targetTableDescriptor, resultSet.getResultColumns(), true);
resultSet.getResultColumns().checkForInvalidDefaults();
resultSet.getResultColumns().forbidOverrides(resultSet.getResultColumns());
} else {
LanguageConnectionContext lcc = getLanguageConnectionContext();
if (lcc.getAutoincrementUpdate() == false)
resultSet.getResultColumns().forbidOverrides(null);
}
/*
** Mark the columns in this UpdateNode's result column list as
** updateable in the ResultColumnList of the table being updated.
** only do this for FromBaseTables - if the result table is a
** CurrentOfNode, it already knows what columns in its cursor
** are updateable.
*/
boolean allColumns = false;
if (targetTable instanceof FromBaseTable) {
((FromBaseTable) targetTable).markUpdated(resultSet.getResultColumns());
} else if ((targetTable instanceof FromVTI) || (targetTable instanceof FromSubquery)) {
resultColumnList = resultSet.getResultColumns();
} else {
/*
** Positioned update: WHERE CURRENT OF
*/
if (SanityManager.DEBUG) {
SanityManager.ASSERT(currentOfNode != null, "currentOfNode is null");
}
ExecPreparedStatement cursorStmt = currentOfNode.getCursorStatement();
/*
** If there is no update column list, we need to build
** out the result column list to have all columns.
*/
if (!cursorStmt.hasUpdateColumns()) {
/*
** Get the resultColumnList representing ALL of the columns in the
** base table. This is the "before" portion of the result row.
*/
getResultColumnList();
/*
** Add the "after" portion of the result row. This is the update
** list augmented to include every column in the target table.
** Those columns that are not being updated are set to themselves.
** The expanded list will be in the order of the columns in the base
** table.
*/
afterColumns = resultSet.getResultColumns().expandToAll(targetTableDescriptor, targetTable.getTableName());
/*
** Need to get all indexes here since we aren't calling
** getReadMap().
*/
getAffectedIndexes(targetTableDescriptor, (ResultColumnList) null, (FormatableBitSet) null);
allColumns = true;
} else {
/* Check the updatability */
resultSet.getResultColumns().checkColumnUpdateability(cursorStmt, currentOfNode.getCursorName());
}
}
changedColumnIds = getChangedColumnIds(resultSet.getResultColumns());
//
// Trigger transition tables are implemented as VTIs. This short-circuits some
// necessary steps if the source table of a MERGE statement is a trigger
// transition table. The following boolean is meant to prevent that short-circuiting.
//
boolean needBaseColumns = (targetVTI == null) || inMatchingClause();
/*
** We need to add in all the columns that are needed
** by the constraints on this table.
*/
if (!allColumns && needBaseColumns) {
getCompilerContext().pushCurrentPrivType(Authorizer.NULL_PRIV);
try {
readColsBitSet = new FormatableBitSet();
FromBaseTable fbt = getResultColumnList(resultSet.getResultColumns());
afterColumns = resultSet.getResultColumns().copyListAndObjects();
readColsBitSet = getReadMap(dataDictionary, targetTableDescriptor, afterColumns, affectedGeneratedColumns);
afterColumns = fbt.addColsToList(afterColumns, readColsBitSet);
resultColumnList = fbt.addColsToList(resultColumnList, readColsBitSet);
/*
** If all bits are set, then behave as if we chose all
** in the first place
*/
int i = 1;
int size = targetTableDescriptor.getMaxColumnID();
for (; i <= size; i++) {
if (!readColsBitSet.get(i)) {
break;
}
}
if (i > size) {
readColsBitSet = null;
}
} finally {
getCompilerContext().popCurrentPrivType();
}
}
ValueNode rowLocationNode;
if (needBaseColumns) {
/* Append the list of "after" columns to the list of "before" columns,
* preserving the afterColumns list. (Necessary for binding
* check constraints.)
*/
resultColumnList.appendResultColumns(afterColumns, false);
/* Generate the RowLocation column */
rowLocationNode = new CurrentRowLocationNode(getContextManager());
} else {
rowLocationNode = new NumericConstantNode(TypeId.getBuiltInTypeId(Types.INTEGER), 0, getContextManager());
}
ResultColumn rowLocationColumn = new ResultColumn(COLUMNNAME, rowLocationNode, getContextManager());
rowLocationColumn.markGenerated();
/* Append to the ResultColumnList */
resultColumnList.addResultColumn(rowLocationColumn);
/*
* The last thing that we do to the generated RCL is to clear
* the table name out from each RC. See comment on
* checkTableNameAndScrubResultColumns().
*/
checkTableNameAndScrubResultColumns(resultColumnList);
/* Set the new result column list in the result set */
resultSet.setResultColumns(resultColumnList);
//
if (inMatchingClause()) {
associateAddedColumns();
}
// SQL 2011, section 6.10, SR 4b.
SelectNode.checkNoWindowFunctions(resultSet, "<update source>");
/* Bind the expressions */
super.bindExpressions();
/* Bind untyped nulls directly under the result columns */
resultSet.getResultColumns().bindUntypedNullsToResultColumns(resultColumnList);
/* Bind the new ResultColumn */
rowLocationColumn.bindResultColumnToExpression();
resultColumnList.checkStorableExpressions();
/* Insert a NormalizeResultSetNode above the source if the source
* and target column types and lengths do not match.
*/
if (!resultColumnList.columnTypesAndLengthsMatch()) {
resultSet = new NormalizeResultSetNode(resultSet, resultColumnList, null, true, getContextManager());
if (hasCheckConstraints(dataDictionary, targetTableDescriptor) || hasGenerationClauses(targetTableDescriptor)) {
/* Get and bind all check constraints and generated columns on the columns
* being updated. We want to bind the check constraints and
* generated columns against
* the after columns. We need to bind against the portion of the
* resultColumns in the new NormalizeResultSet that point to
* afterColumns. Create an RCL composed of just those RCs in
* order to bind the check constraints.
*/
int afterColumnsSize = afterColumns.size();
afterColumns = new ResultColumnList(getContextManager());
ResultColumnList normalizedRCs = resultSet.getResultColumns();
for (int index = 0; index < afterColumnsSize; index++) {
afterColumns.addElement(normalizedRCs.elementAt(index + afterColumnsSize));
}
}
}
if (null != targetVTI && !inMatchingClause()) {
deferred = VTIDeferModPolicy.deferIt(DeferModification.UPDATE_STATEMENT, targetVTI, resultColumnList.getColumnNames(), sel.getWhereClause());
} else // not VTI
{
/* we always include triggers in core language */
boolean hasTriggers = (getAllRelevantTriggers(dataDictionary, targetTableDescriptor, changedColumnIds, true).size() > 0);
ResultColumnList sourceRCL = hasTriggers ? resultColumnList : afterColumns;
/* bind all generation clauses for generated columns */
parseAndBindGenerationClauses(dataDictionary, targetTableDescriptor, afterColumns, resultColumnList, true, resultSet);
/* Get and bind all constraints on the columns being updated */
checkConstraints = bindConstraints(dataDictionary, getOptimizerFactory(), targetTableDescriptor, null, sourceRCL, changedColumnIds, readColsBitSet, true, /* we always include triggers in core language */
new boolean[1]);
/* If the target table is also a source table, then
* the update will have to be in deferred mode
* For updates, this means that the target table appears in a
* subquery. Also, self referencing foreign keys are
* deferred. And triggers cause an update to be deferred.
*/
if (resultSet.subqueryReferencesTarget(targetTableDescriptor.getName(), true) || requiresDeferredProcessing()) {
deferred = true;
}
TransactionController tc = getLanguageConnectionContext().getTransactionCompile();
autoincRowLocation = dataDictionary.computeAutoincRowLocations(tc, targetTableDescriptor);
}
identitySequenceUUIDString = getUUIDofSequenceGenerator();
getCompilerContext().popCurrentPrivType();
getCompilerContext().removePrivilegeFilter(tagFilter);
//
for (CastNode value : allCastNodes) {
addUDTUsagePriv(value);
}
}
use of org.apache.derby.iapi.sql.execute.ExecPreparedStatement in project derby by apache.
the class BaseActivation method checkPositionedStatement.
/**
* Check that a positioned statement is executing against a cursor
* from the same PreparedStatement (plan) that the positioned
* statement was original compiled against.
*
* Only called from generated code for positioned UPDATE and DELETE
* statements. See CurrentOfNode.
*
* @param cursorName Name of the cursor
* @param psName Object name of the PreparedStatement.
* @throws StandardException
*/
protected void checkPositionedStatement(String cursorName, String psName) throws StandardException {
ExecPreparedStatement ps = getPreparedStatement();
if (ps == null)
return;
CursorActivation cursorActivation = lcc.lookupCursorActivation(cursorName);
if (cursorActivation != null) {
// check we are compiled against the correct cursor
if (!psName.equals(cursorActivation.getPreparedStatement().getObjectName())) {
// our prepared statement is now invalid since there
// exists another cursor with the same name but a different
// statement.
ps.makeInvalid(DependencyManager.CHANGED_CURSOR, lcc);
}
}
}
use of org.apache.derby.iapi.sql.execute.ExecPreparedStatement in project derby by apache.
the class BaseActivation method shouldWeCheckRowCounts.
/**
* Find out if it's time to check the row counts of the tables involved
* in this query.
* @return true if the row counts should be checked, false otherwise
*/
protected boolean shouldWeCheckRowCounts() throws StandardException {
final ExecPreparedStatement ps = getPreparedStatement();
/*
** Check the row count only every N executions. OK to check this
** without synchronization, since the value of this number is not
** critical. The value of N is determined by the property
** derby.language.stalePlanCheckInterval.
*/
int executionCount = ps.incrementExecutionCount();
if (executionCount == 1) {
return true;
} else if (executionCount < Property.MIN_LANGUAGE_STALE_PLAN_CHECK_INTERVAL) {
return false;
} else {
int stalePlanCheckInterval = ps.getStalePlanCheckInterval();
/*
** Only query the database property once. We can tell because
** the minimum value of the property is greater than zero.
*/
if (stalePlanCheckInterval == 0) {
TransactionController tc = getTransactionController();
stalePlanCheckInterval = PropertyUtil.getServiceInt(tc, Property.LANGUAGE_STALE_PLAN_CHECK_INTERVAL, Property.MIN_LANGUAGE_STALE_PLAN_CHECK_INTERVAL, Integer.MAX_VALUE, Property.DEFAULT_LANGUAGE_STALE_PLAN_CHECK_INTERVAL);
ps.setStalePlanCheckInterval(stalePlanCheckInterval);
}
return (executionCount % stalePlanCheckInterval) == 1;
}
}
use of org.apache.derby.iapi.sql.execute.ExecPreparedStatement in project derby by apache.
the class GenericTriggerExecutor method executeSPS.
/**
* Execute the given stored prepared statement. We
* just grab the prepared statement from the spsd,
* get a new activation holder and let er rip.
*
* @param sps the SPS to execute
* @param isWhen {@code true} if the SPS is for the WHEN clause,
* {@code false} otherwise
* @return {@code true} if the SPS is for a WHEN clause and it evaluated
* to {@code TRUE}, {@code false} otherwise
* @exception StandardException on error
*/
private boolean executeSPS(SPSDescriptor sps, boolean isWhen) throws StandardException {
boolean recompile = false;
boolean whenClauseWasTrue = false;
// The prepared statement and the activation may already be available
// if the trigger has been fired before in the same statement. (Only
// happens with row triggers that are triggered by a statement that
// touched multiple rows.) The WHEN clause and the trigger action have
// their own prepared statement and activation. Fetch the correct set.
ExecPreparedStatement ps = isWhen ? whenPS : actionPS;
Activation spsActivation = isWhen ? spsWhenActivation : spsActionActivation;
while (true) {
/*
** Only grab the ps the 1st time through. This
** way a row trigger doesn't do any unnecessary
** setup work.
*/
if (ps == null || recompile) {
// The SPS activation will set its parent activation from
// the statement context. Reset it to the original parent
// activation first so that it doesn't use the activation of
// the previously executed SPS as parent. DERBY-6348.
lcc.getStatementContext().setActivation(activation);
/*
** We need to clone the prepared statement so we don't
** wind up marking that ps that is tied to sps as finished
** during the course of execution.
*/
ps = sps.getPreparedStatement();
ps = ps.getClone();
// it should be valid since we've just prepared for it
ps.setValid();
spsActivation = ps.getActivation(lcc, false);
/*
** Normally, we want getSource() for an sps invocation
** to be EXEC STATEMENT xxx, but in this case, since
** we are executing the SPS in our own fashion, we want
** the text to be the trigger action. So set it accordingly.
*/
ps.setSource(sps.getText());
ps.setSPSAction();
// trigger fires multiple times.
if (isWhen) {
whenPS = ps;
spsWhenActivation = spsActivation;
} else {
actionPS = ps;
spsActionActivation = spsActivation;
}
}
// save the active statement context for exception handling purpose
StatementContext active_sc = lcc.getStatementContext();
/*
** Execute the activation. If we have an error, we
** are going to go to some extra work to pop off
** our statement context. This is because we are
** a nested statement (we have 2 activations), but
** we aren't a nested connection, so we have to
** pop off our statementcontext to get error handling
** to work correctly. This is normally a no-no, but
** we are an unusual case.
*/
try {
// This is a substatement; for now, we do not set any timeout
// for it. We might change this behaviour later, by linking
// timeout to its parent statement's timeout settings.
ResultSet rs = ps.executeSubStatement(activation, spsActivation, false, 0L);
if (isWhen) {
// This is a WHEN clause. Expect a single BOOLEAN value
// to be returned.
ExecRow row = rs.getNextRow();
if (SanityManager.DEBUG && row.nColumns() != 1) {
SanityManager.THROWASSERT("Expected WHEN clause to have exactly " + "one column, found: " + row.nColumns());
}
DataValueDescriptor value = row.getColumn(1);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(value instanceof SQLBoolean);
}
whenClauseWasTrue = !value.isNull() && value.getBoolean();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(rs.getNextRow() == null, "WHEN clause returned more than one row");
}
} else if (rs.returnsRows()) {
// The result set was opened in ps.execute()
while (rs.getNextRow() != null) {
}
}
rs.close();
} catch (StandardException e) {
/*
** When a trigger SPS action is executed and results in
** an exception, the system needs to clean up the active
** statement context(SC) and the trigger execution context
** (TEC) in language connection context(LCC) properly (e.g.:
** "Maximum depth triggers exceeded" exception); otherwise,
** this will leave old TECs lingering and may result in
** subsequent statements within the same connection to throw
** the same exception again prematurely.
**
** A new statement context will be created for the SPS before
** it is executed. However, it is possible for some
** StandardException to be thrown before a new statement
** context is pushed down to the context stack; hence, the
** trigger executor needs to ensure that the current active SC
** is associated with the SPS, so that it is cleaning up the
** right statement context in LCC.
**
** It is also possible that the error has already been handled
** on a lower level, especially if the trigger re-enters the
** JDBC layer. In that case, the current SC will be null.
**
** When the active SC is cleaned up, the TEC will be removed
** from LCC and the SC object will be popped off from the LCC
** as part of cleanupOnError logic.
*/
/* retrieve the current active SC */
StatementContext sc = lcc.getStatementContext();
/* make sure that the cleanup is on the new SC */
if (sc != null && active_sc != sc) {
sc.cleanupOnError(e);
}
/* Handle dynamic recompiles */
if (e.getMessageId().equals(SQLState.LANG_STATEMENT_NEEDS_RECOMPILE)) {
recompile = true;
sps.revalidate(lcc);
continue;
}
spsActivation.close();
throw e;
}
/* Done with execution without any recompiles */
return whenClauseWasTrue;
}
}
use of org.apache.derby.iapi.sql.execute.ExecPreparedStatement in project derby by apache.
the class SYSSTATEMENTSRowFactory method buildDescriptor.
// /////////////////////////////////////////////////////////////////////////
//
// ABSTRACT METHODS TO BE IMPLEMENTED BY CHILDREN OF CatalogRowFactory
//
// /////////////////////////////////////////////////////////////////////////
/**
* Make an Tuple Descriptor out of a SYSSTATEMENTS row
*
* @param row a SYSSTATEMENTS row
* @param parentTupleDescriptor unused
* @param dd dataDictionary
*
* @return a descriptor equivalent to a SYSSTATEMENTS row
*
* @exception StandardException thrown on failure
*/
public TupleDescriptor buildDescriptor(ExecRow row, TupleDescriptor parentTupleDescriptor, DataDictionary dd) throws StandardException {
DataValueDescriptor col;
SPSDescriptor descriptor;
String name;
String text;
String usingText;
UUID uuid;
UUID compUuid = null;
String uuidStr;
// schema
UUID suuid;
// schema
String suuidStr;
String typeStr;
char type;
boolean valid;
Timestamp time = null;
ExecPreparedStatement preparedStatement = null;
boolean initiallyCompilable;
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(row.nColumns() == SYSSTATEMENTS_COLUMN_COUNT, "Wrong number of columns for a SYSSTATEMENTS row");
}
// 1st column is STMTID (UUID - char(36))
col = row.getColumn(1);
uuidStr = col.getString();
uuid = getUUIDFactory().recreateUUID(uuidStr);
// 2nd column is STMTNAME (varchar(128))
col = row.getColumn(2);
name = col.getString();
// 3rd column is SCHEMAID (UUID - char(36))
col = row.getColumn(3);
suuidStr = col.getString();
suuid = getUUIDFactory().recreateUUID(suuidStr);
// 4th column is TYPE (char(1))
col = row.getColumn(4);
type = col.getString().charAt(0);
if (SanityManager.DEBUG) {
if (!SPSDescriptor.validType(type)) {
SanityManager.THROWASSERT("Bad type value (" + type + ") for statement " + name);
}
}
// so force a recompile.
if (dd.isReadOnlyUpgrade()) {
valid = false;
} else {
// 5th column is VALID (boolean)
col = row.getColumn(5);
valid = col.getBoolean();
}
// 6th column is TEXT (LONG VARCHAR)
col = row.getColumn(6);
text = col.getString();
/* 7th column is LASTCOMPILED (TIMESTAMP) */
col = row.getColumn(7);
time = col.getTimestamp(new java.util.GregorianCalendar());
// 8th column is COMPILATIONSCHEMAID (UUID - char(36))
col = row.getColumn(8);
uuidStr = col.getString();
if (uuidStr != null)
compUuid = getUUIDFactory().recreateUUID(uuidStr);
// 9th column is TEXT (LONG VARCHAR)
col = row.getColumn(9);
usingText = col.getString();
// Only load the compiled plan if the statement is valid
if (valid) {
col = row.getColumn(10);
preparedStatement = (ExecPreparedStatement) col.getObject();
}
// 11th column is INITIALLY_COMPILABLE (boolean)
col = row.getColumn(11);
if (col.isNull()) {
initiallyCompilable = true;
} else {
initiallyCompilable = col.getBoolean();
}
descriptor = new SPSDescriptor(dd, name, uuid, suuid, compUuid, type, valid, text, usingText, time, preparedStatement, initiallyCompilable);
return descriptor;
}
Aggregations