use of org.datanucleus.store.rdbms.SQLController in project datanucleus-rdbms by datanucleus.
the class OracleClobRDBMSMapping method updateClobColumn.
/**
* Convenience method to update the contents of a CLOB column.
* Oracle requires that a CLOB is initialised with EMPTY_CLOB() and then you retrieve
* the column and update its CLOB value. Performs a statement
* <pre>
* SELECT {clobColumn} FROM TABLE WHERE ID=? FOR UPDATE
* </pre>
* and then updates the Clob value returned.
* @param op ObjectProvider of the object
* @param table Table storing the CLOB column
* @param mapping Datastore mapping for the CLOB column
* @param value The value to store in the CLOB
* @throws NucleusObjectNotFoundException Thrown if an object is not found
* @throws NucleusDataStoreException Thrown if an error occurs in datastore communication
*/
@SuppressWarnings("deprecation")
public static void updateClobColumn(ObjectProvider op, Table table, DatastoreMapping mapping, String value) {
ExecutionContext ec = op.getExecutionContext();
RDBMSStoreManager storeMgr = table.getStoreManager();
// Don't support join tables yet
DatastoreClass classTable = (DatastoreClass) table;
SQLExpressionFactory exprFactory = storeMgr.getSQLExpressionFactory();
// Generate "SELECT {clobColumn} FROM TABLE WHERE ID=? FOR UPDATE" statement
SelectStatement sqlStmt = new SelectStatement(storeMgr, table, null, null);
sqlStmt.setClassLoaderResolver(ec.getClassLoaderResolver());
sqlStmt.addExtension(SQLStatement.EXTENSION_LOCK_FOR_UPDATE, true);
SQLTable blobSqlTbl = SQLStatementHelper.getSQLTableForMappingOfTable(sqlStmt, sqlStmt.getPrimaryTable(), mapping.getJavaTypeMapping());
sqlStmt.select(blobSqlTbl, mapping.getColumn(), null);
StatementClassMapping mappingDefinition = new StatementClassMapping();
AbstractClassMetaData cmd = op.getClassMetaData();
int inputParamNum = 1;
if (cmd.getIdentityType() == IdentityType.DATASTORE) {
// Datastore identity value for input
JavaTypeMapping datastoreIdMapping = classTable.getSurrogateMapping(SurrogateColumnType.DATASTORE_ID, false);
SQLExpression expr = exprFactory.newExpression(sqlStmt, sqlStmt.getPrimaryTable(), datastoreIdMapping);
SQLExpression val = exprFactory.newLiteralParameter(sqlStmt, datastoreIdMapping, null, "ID");
sqlStmt.whereAnd(expr.eq(val), true);
StatementMappingIndex datastoreIdx = mappingDefinition.getMappingForMemberPosition(SurrogateColumnType.DATASTORE_ID.getFieldNumber());
if (datastoreIdx == null) {
datastoreIdx = new StatementMappingIndex(datastoreIdMapping);
mappingDefinition.addMappingForMember(SurrogateColumnType.DATASTORE_ID.getFieldNumber(), datastoreIdx);
}
datastoreIdx.addParameterOccurrence(new int[] { inputParamNum });
} else if (cmd.getIdentityType() == IdentityType.APPLICATION) {
// Application identity value(s) for input
int[] pkNums = cmd.getPKMemberPositions();
for (int i = 0; i < pkNums.length; i++) {
AbstractMemberMetaData mmd = cmd.getMetaDataForManagedMemberAtAbsolutePosition(pkNums[i]);
JavaTypeMapping pkMapping = classTable.getMemberMapping(mmd);
SQLExpression expr = exprFactory.newExpression(sqlStmt, sqlStmt.getPrimaryTable(), pkMapping);
SQLExpression val = exprFactory.newLiteralParameter(sqlStmt, pkMapping, null, "PK" + i);
sqlStmt.whereAnd(expr.eq(val), true);
StatementMappingIndex pkIdx = mappingDefinition.getMappingForMemberPosition(pkNums[i]);
if (pkIdx == null) {
pkIdx = new StatementMappingIndex(pkMapping);
mappingDefinition.addMappingForMember(pkNums[i], pkIdx);
}
int[] inputParams = new int[pkMapping.getNumberOfDatastoreMappings()];
for (int j = 0; j < pkMapping.getNumberOfDatastoreMappings(); j++) {
inputParams[j] = inputParamNum++;
}
pkIdx.addParameterOccurrence(inputParams);
}
}
String textStmt = sqlStmt.getSQLText().toSQL();
if (op.isEmbedded()) {
// This mapping is embedded, so navigate back to the real owner since that is the "id" in the table
ObjectProvider[] embeddedOwners = ec.getOwnersForEmbeddedObjectProvider(op);
if (embeddedOwners != null) {
// Just use the first owner
// TODO Should check if the owner is stored in this table
op = embeddedOwners[0];
}
}
try {
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
PreparedStatement ps = sqlControl.getStatementForQuery(mconn, textStmt);
try {
// Provide the primary key field(s) to the JDBC statement
if (cmd.getIdentityType() == IdentityType.DATASTORE) {
StatementMappingIndex datastoreIdx = mappingDefinition.getMappingForMemberPosition(SurrogateColumnType.DATASTORE_ID.getFieldNumber());
for (int i = 0; i < datastoreIdx.getNumberOfParameterOccurrences(); i++) {
classTable.getSurrogateMapping(SurrogateColumnType.DATASTORE_ID, false).setObject(ec, ps, datastoreIdx.getParameterPositionsForOccurrence(i), op.getInternalObjectId());
}
} else if (cmd.getIdentityType() == IdentityType.APPLICATION) {
op.provideFields(cmd.getPKMemberPositions(), new ParameterSetter(op, ps, mappingDefinition));
}
ResultSet rs = sqlControl.executeStatementQuery(ec, mconn, textStmt, ps);
try {
if (!rs.next()) {
throw new NucleusObjectNotFoundException("No such database row", op.getInternalObjectId());
}
DatastoreAdapter dba = storeMgr.getDatastoreAdapter();
int jdbcMajorVersion = dba.getDriverMajorVersion();
if (dba.getDatastoreDriverName().equalsIgnoreCase(OracleAdapter.OJDBC_DRIVER_NAME) && jdbcMajorVersion < 10) {
// Oracle JDBC drivers version 9 and below use some sh*tty Oracle-specific CLOB type
// we have to cast to that, face west, pray whilst saying ommmmmmmmmmm
oracle.sql.CLOB clob = (oracle.sql.CLOB) rs.getClob(1);
if (clob != null) {
// Deprecated but what can you do
clob.putString(1, value);
}
} else {
// Oracle JDBC drivers 10 and above supposedly use the JDBC standard class for Clobs
java.sql.Clob clob = rs.getClob(1);
if (clob != null) {
clob.setString(1, value);
}
}
} finally {
rs.close();
}
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
throw new NucleusDataStoreException("Update of CLOB value failed: " + textStmt, e);
}
}
use of org.datanucleus.store.rdbms.SQLController in project datanucleus-rdbms by datanucleus.
the class RDBMSQueryUtils method getPreparedStatementForQuery.
/**
* Method to create a PreparedStatement for use with the query.
* @param conn the Connection
* @param queryStmt The statement text for the query
* @param query The query
* @return the PreparedStatement
* @throws SQLException Thrown if an error occurs creating the statement
*/
public static PreparedStatement getPreparedStatementForQuery(ManagedConnection conn, String queryStmt, Query query) throws SQLException {
// Apply any non-standard result set definition if required (either from the PMF, or via query extensions)
String rsTypeString = RDBMSQueryUtils.getResultSetTypeForQuery(query);
if (rsTypeString != null && (!rsTypeString.equals(QUERY_RESULTSET_TYPE_SCROLL_SENSITIVE) && !rsTypeString.equals(QUERY_RESULTSET_TYPE_FORWARD_ONLY) && !rsTypeString.equals(QUERY_RESULTSET_TYPE_SCROLL_INSENSITIVE))) {
throw new NucleusUserException(Localiser.msg("052510"));
}
if (rsTypeString != null) {
DatastoreAdapter dba = ((RDBMSStoreManager) query.getStoreManager()).getDatastoreAdapter();
// Add checks on what the DatastoreAdapter supports
if (rsTypeString.equals(QUERY_RESULTSET_TYPE_SCROLL_SENSITIVE) && !dba.supportsOption(DatastoreAdapter.RESULTSET_TYPE_SCROLL_SENSITIVE)) {
rsTypeString = QUERY_RESULTSET_TYPE_FORWARD_ONLY;
NucleusLogger.DATASTORE_RETRIEVE.info("Query requested to run with result-set type of " + rsTypeString + " yet not supported by adapter. Using " + rsTypeString);
} else if (rsTypeString.equals(QUERY_RESULTSET_TYPE_SCROLL_INSENSITIVE) && !dba.supportsOption(DatastoreAdapter.RESULTSET_TYPE_SCROLL_INSENSITIVE)) {
rsTypeString = QUERY_RESULTSET_TYPE_FORWARD_ONLY;
NucleusLogger.DATASTORE_RETRIEVE.info("Query requested to run with result-set type of " + rsTypeString + " yet not supported by adapter. Using " + rsTypeString);
} else if (rsTypeString.equals(QUERY_RESULTSET_TYPE_FORWARD_ONLY) && !dba.supportsOption(DatastoreAdapter.RESULTSET_TYPE_FORWARD_ONLY)) {
rsTypeString = QUERY_RESULTSET_TYPE_SCROLL_SENSITIVE;
NucleusLogger.DATASTORE_RETRIEVE.info("Query requested to run with result-set type of " + rsTypeString + " yet not supported by adapter. Using " + rsTypeString);
}
}
String rsConcurrencyString = RDBMSQueryUtils.getResultSetConcurrencyForQuery(query);
if (rsConcurrencyString != null && (!rsConcurrencyString.equals(QUERY_RESULTSET_CONCURRENCY_READONLY) && !rsConcurrencyString.equals(QUERY_RESULTSET_CONCURRENCY_UPDATEABLE))) {
throw new NucleusUserException(Localiser.msg("052511"));
}
SQLController sqlControl = ((RDBMSStoreManager) query.getStoreManager()).getSQLController();
PreparedStatement ps = sqlControl.getStatementForQuery(conn, queryStmt, rsTypeString, rsConcurrencyString);
return ps;
}
use of org.datanucleus.store.rdbms.SQLController in project datanucleus-rdbms by datanucleus.
the class SQLQuery method performExecute.
/**
* Execute the query and return the result.
* For a SELECT query this will be the QueryResult.
* For an UPDATE/DELETE it will be the row count for the update statement.
* @param parameters the Map containing all of the parameters (positional parameters) (not null)
* @return the result of the query
*/
protected Object performExecute(Map parameters) {
if (parameters.size() != (parameterNames != null ? parameterNames.length : 0)) {
throw new NucleusUserException(Localiser.msg("059019", (parameterNames != null) ? "" + parameterNames.length : 0, "" + parameters.size()));
}
if (type == QueryType.BULK_DELETE || type == QueryType.BULK_UPDATE) {
// Update/Delete statement (INSERT/UPDATE/DELETE/MERGE)
try {
RDBMSStoreManager storeMgr = (RDBMSStoreManager) getStoreManager();
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
PreparedStatement ps = sqlControl.getStatementForUpdate(mconn, compiledSQL, false);
try {
// Set the values of any parameters
for (int i = 0; i < parameters.size(); i++) {
ps.setObject((i + 1), parameters.get(Integer.valueOf(i + 1)));
}
// Execute the update statement
int[] rcs = sqlControl.executeStatementUpdate(ec, mconn, compiledSQL, ps, true);
// Return a single Long with the number of records updated
return Long.valueOf(rcs[0]);
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
throw new NucleusDataStoreException(Localiser.msg("059025", compiledSQL), e);
}
} else if (type == QueryType.SELECT) {
// Query statement (SELECT, stored-procedure)
AbstractRDBMSQueryResult qr = null;
try {
RDBMSStoreManager storeMgr = (RDBMSStoreManager) getStoreManager();
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
PreparedStatement ps = RDBMSQueryUtils.getPreparedStatementForQuery(mconn, compiledSQL, this);
try {
// Set the values of any parameters
for (int i = 0; i < parameters.size(); i++) {
ps.setObject((i + 1), parameters.get(Integer.valueOf(i + 1)));
}
// Apply any user-specified constraints over timeouts and ResultSet
RDBMSQueryUtils.prepareStatementForExecution(ps, this, true);
ResultSet rs = sqlControl.executeStatementQuery(ec, mconn, compiledSQL, ps);
try {
// Generate a ResultObjectFactory
ResultObjectFactory rof = null;
if (resultMetaData != null) {
// Each row of the ResultSet is defined by MetaData
rof = new ResultMetaDataROF(ec, rs, ignoreCache, resultMetaData);
} else if (resultClass != null || candidateClass == null) {
// Each row of the ResultSet is either an instance of resultClass, or Object[]
rof = RDBMSQueryUtils.getResultObjectFactoryForNoCandidateClass(ec, rs, resultClass);
} else {
// Each row of the ResultSet is an instance of the candidate class
rof = getResultObjectFactoryForCandidateClass(rs);
}
// Return the associated type of results depending on whether scrollable or not
qr = RDBMSQueryUtils.getQueryResultForQuery(this, rof, rs, null);
qr.initialise();
final QueryResult qr1 = qr;
final ManagedConnection mconn1 = mconn;
mconn.addListener(new ManagedConnectionResourceListener() {
public void transactionFlushed() {
}
public void transactionPreClose() {
// Disconnect the query from this ManagedConnection (read in unread rows etc)
qr1.disconnect();
}
public void managedConnectionPreClose() {
if (!ec.getTransaction().isActive()) {
// Disconnect the query from this ManagedConnection (read in unread rows etc)
qr1.disconnect();
}
}
public void managedConnectionPostClose() {
}
public void resourcePostClose() {
mconn1.removeListener(this);
}
});
} finally {
if (qr == null) {
rs.close();
}
}
} catch (QueryInterruptedException qie) {
// Execution was cancelled so cancel the PreparedStatement
ps.cancel();
throw qie;
} finally {
if (qr == null) {
sqlControl.closeStatement(mconn, ps);
}
}
} finally {
mconn.release();
}
} catch (SQLException e) {
throw new NucleusDataStoreException(Localiser.msg("059025", compiledSQL), e);
}
return qr;
} else {
// 'Other' statement (manually invoked stored-procedure?, CREATE?, DROP?, or similar)
try {
RDBMSStoreManager storeMgr = (RDBMSStoreManager) getStoreManager();
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
PreparedStatement ps = RDBMSQueryUtils.getPreparedStatementForQuery(mconn, compiledSQL, this);
try {
// Set the values of any parameters
for (int i = 0; i < parameters.size(); i++) {
ps.setObject((i + 1), parameters.get(Integer.valueOf(i + 1)));
}
// Apply any user-specified constraints over timeouts etc
RDBMSQueryUtils.prepareStatementForExecution(ps, this, false);
sqlControl.executeStatement(ec, mconn, compiledSQL, ps);
} catch (QueryInterruptedException qie) {
// Execution was cancelled so cancel the PreparedStatement
ps.cancel();
throw qie;
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
throw new NucleusDataStoreException(Localiser.msg("059025", compiledSQL), e);
}
return true;
}
}
use of org.datanucleus.store.rdbms.SQLController in project datanucleus-rdbms by datanucleus.
the class JDOQLQuery method performExecute.
protected Object performExecute(Map parameters) {
if (statementReturnsEmpty) {
return Collections.EMPTY_LIST;
}
boolean inMemory = evaluateInMemory();
if (candidateCollection != null) {
// Supplied collection of instances, so evaluate in-memory
if (candidateCollection.isEmpty()) {
return Collections.EMPTY_LIST;
} else if (inMemory) {
return new JDOQLInMemoryEvaluator(this, new ArrayList(candidateCollection), compilation, parameters, clr).execute(true, true, true, true, true);
}
} else if (type == QueryType.SELECT) {
// Query results are cached, so return those
List<Object> cachedResults = getQueryManager().getQueryResult(this, parameters);
if (cachedResults != null) {
return new CandidateIdsQueryResult(this, cachedResults);
}
}
Object results = null;
RDBMSStoreManager storeMgr = (RDBMSStoreManager) getStoreManager();
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
try {
// Execute the query
long startTime = System.currentTimeMillis();
if (NucleusLogger.QUERY.isDebugEnabled()) {
NucleusLogger.QUERY.debug(Localiser.msg("021046", getLanguage(), getSingleStringQuery(), null));
}
AbstractClassMetaData acmd = ec.getMetaDataManager().getMetaDataForClass(candidateClass, clr);
SQLController sqlControl = storeMgr.getSQLController();
PreparedStatement ps = null;
try {
if (type == QueryType.SELECT) {
// Create PreparedStatement and apply parameters, result settings etc
ps = RDBMSQueryUtils.getPreparedStatementForQuery(mconn, datastoreCompilation.getSQL(), this);
SQLStatementHelper.applyParametersToStatement(ps, ec, datastoreCompilation.getStatementParameters(), datastoreCompilation.getParameterNameByPosition(), parameters);
RDBMSQueryUtils.prepareStatementForExecution(ps, this, true);
registerTask(ps);
ResultSet rs = null;
try {
rs = sqlControl.executeStatementQuery(ec, mconn, toString(), ps);
} finally {
deregisterTask();
}
AbstractRDBMSQueryResult qr = null;
try {
if (inMemory) {
// IN-MEMORY EVALUATION
ResultObjectFactory rof = new PersistentClassROF(ec, rs, ignoreCache, datastoreCompilation.getResultDefinitionForClass(), acmd, candidateClass);
// Just instantiate the candidates for later in-memory processing
// TODO Use a queryResult rather than an ArrayList so we load when required
List candidates = new ArrayList();
while (rs.next()) {
candidates.add(rof.getObject());
}
// Perform in-memory filter/result/order etc
results = new JDOQLInMemoryEvaluator(this, candidates, compilation, parameters, clr).execute(true, true, true, true, true);
} else {
// IN-DATASTORE EVALUATION
ResultObjectFactory rof = null;
if (result != null) {
// Each result row is of a result type
rof = new ResultClassROF(ec, rs, ignoreCache, resultClass, datastoreCompilation.getResultDefinition());
} else if (resultClass != null && resultClass != candidateClass) {
rof = new ResultClassROF(ec, rs, ignoreCache, resultClass, datastoreCompilation.getResultDefinitionForClass());
} else {
// Each result row is a candidate object
rof = new PersistentClassROF(ec, rs, ignoreCache, datastoreCompilation.getResultDefinitionForClass(), acmd, candidateClass);
}
// Create the required type of QueryResult
qr = RDBMSQueryUtils.getQueryResultForQuery(this, rof, rs, getResultDistinct() ? null : candidateCollection);
// Register any bulk loaded member resultSets that need loading
Map<String, IteratorStatement> scoIterStmts = datastoreCompilation.getSCOIteratorStatements();
if (scoIterStmts != null) {
Iterator<Map.Entry<String, IteratorStatement>> scoStmtIter = scoIterStmts.entrySet().iterator();
while (scoStmtIter.hasNext()) {
Map.Entry<String, IteratorStatement> stmtIterEntry = scoStmtIter.next();
IteratorStatement iterStmt = stmtIterEntry.getValue();
String iterStmtSQL = iterStmt.getSelectStatement().getSQLText().toSQL();
NucleusLogger.DATASTORE_RETRIEVE.debug("JDOQL Bulk-Fetch of " + iterStmt.getBackingStore().getOwnerMemberMetaData().getFullFieldName());
try {
PreparedStatement psSco = sqlControl.getStatementForQuery(mconn, iterStmtSQL);
if (datastoreCompilation.getStatementParameters() != null) {
BulkFetchHandler.applyParametersToStatement(ec, psSco, datastoreCompilation, iterStmt.getSelectStatement(), parameters);
}
ResultSet rsSCO = sqlControl.executeStatementQuery(ec, mconn, iterStmtSQL, psSco);
qr.registerMemberBulkResultSet(iterStmt, rsSCO);
} catch (SQLException e) {
throw new NucleusDataStoreException(Localiser.msg("056006", iterStmtSQL), e);
}
}
}
// Initialise the QueryResult for use
qr.initialise();
// Add hooks for closing query resources
final QueryResult qr1 = qr;
final ManagedConnection mconn1 = mconn;
ManagedConnectionResourceListener listener = new ManagedConnectionResourceListener() {
public void transactionFlushed() {
}
public void transactionPreClose() {
// Tx : disconnect query from ManagedConnection (read in unread rows etc)
qr1.disconnect();
}
public void managedConnectionPreClose() {
if (!ec.getTransaction().isActive()) {
// Non-Tx : disconnect query from ManagedConnection (read in unread rows etc)
qr1.disconnect();
}
}
public void managedConnectionPostClose() {
}
public void resourcePostClose() {
mconn1.removeListener(this);
}
};
mconn.addListener(listener);
qr.addConnectionListener(listener);
results = qr;
}
} finally {
if (qr == null) {
rs.close();
}
}
} else if (type == QueryType.BULK_UPDATE || type == QueryType.BULK_DELETE) {
long bulkResult = 0;
List<StatementCompilation> stmtCompilations = datastoreCompilation.getStatementCompilations();
Iterator<StatementCompilation> stmtCompileIter = stmtCompilations.iterator();
while (stmtCompileIter.hasNext()) {
StatementCompilation stmtCompile = stmtCompileIter.next();
ps = sqlControl.getStatementForUpdate(mconn, stmtCompile.getSQL(), false);
SQLStatementHelper.applyParametersToStatement(ps, ec, datastoreCompilation.getStatementParameters(), datastoreCompilation.getParameterNameByPosition(), parameters);
RDBMSQueryUtils.prepareStatementForExecution(ps, this, false);
int[] execResults = sqlControl.executeStatementUpdate(ec, mconn, toString(), ps, true);
if (stmtCompile.useInCount()) {
bulkResult += execResults[0];
}
}
try {
// Evict all objects of this type from the cache
ec.getNucleusContext().getLevel2Cache().evictAll(candidateClass, subclasses);
} catch (UnsupportedOperationException uoe) {
// Do nothing
}
results = bulkResult;
}
} catch (SQLException sqle) {
if (storeMgr.getDatastoreAdapter().isStatementCancel(sqle)) {
throw new QueryInterruptedException("Query has been interrupted", sqle);
} else if (storeMgr.getDatastoreAdapter().isStatementTimeout(sqle)) {
throw new QueryTimeoutException("Query has been timed out", sqle);
}
throw new NucleusException(Localiser.msg("021042", datastoreCompilation.getSQL()), sqle);
}
if (NucleusLogger.QUERY.isDebugEnabled()) {
NucleusLogger.QUERY.debug(Localiser.msg("021074", getLanguage(), "" + (System.currentTimeMillis() - startTime)));
}
return results;
} finally {
mconn.release();
}
}
use of org.datanucleus.store.rdbms.SQLController in project datanucleus-rdbms by datanucleus.
the class DeleteRequest method execute.
/**
* Method performing the deletion of the record from the datastore.
* Takes the constructed deletion query and populates with the specific record information.
* @param op The ObjectProvider for the record to be deleted.
*/
public void execute(ObjectProvider op) {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
// Debug information about what we are deleting
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052210", op.getObjectAsPrintable(), table));
}
// Process all related fields first
// a). Delete any dependent objects
// b). Null any non-dependent objects with FK at other side
ClassLoaderResolver clr = op.getExecutionContext().getClassLoaderResolver();
Set relatedObjectsToDelete = null;
for (int i = 0; i < callbacks.length; ++i) {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052212", op.getObjectAsPrintable(), ((JavaTypeMapping) callbacks[i]).getMemberMetaData().getFullFieldName()));
}
callbacks[i].preDelete(op);
// Check for any dependent related 1-1 objects where we hold the FK and where the object hasn't been deleted.
// This can happen if this DeleteRequest was triggered by delete-orphans and so the related object has to be deleted *after* this object.
// It's likely we could do this better by using AttachFieldManager and just marking the "orphan" (i.e this object) as deleted
// (see AttachFieldManager TODO regarding when not copying)
JavaTypeMapping mapping = (JavaTypeMapping) callbacks[i];
AbstractMemberMetaData mmd = mapping.getMemberMetaData();
RelationType relationType = mmd.getRelationType(clr);
if (mmd.isDependent() && (relationType == RelationType.ONE_TO_ONE_UNI || (relationType == RelationType.ONE_TO_ONE_BI && mmd.getMappedBy() == null))) {
try {
op.isLoaded(mmd.getAbsoluteFieldNumber());
Object relatedPc = op.provideField(mmd.getAbsoluteFieldNumber());
boolean relatedObjectDeleted = op.getExecutionContext().getApiAdapter().isDeleted(relatedPc);
if (!relatedObjectDeleted) {
if (relatedObjectsToDelete == null) {
relatedObjectsToDelete = new HashSet();
}
relatedObjectsToDelete.add(relatedPc);
}
} catch (// Should be XXXObjectNotFoundException but dont want to use JDO class
Exception e) {
}
}
}
// and cater for other cases, in particular persistent interfaces
if (oneToOneNonOwnerFields != null && oneToOneNonOwnerFields.length > 0) {
for (int i = 0; i < oneToOneNonOwnerFields.length; i++) {
AbstractMemberMetaData relatedFmd = oneToOneNonOwnerFields[i];
updateOneToOneBidirectionalOwnerObjectForField(op, relatedFmd);
}
}
// Choose the statement based on whether optimistic or not
String stmt = null;
ExecutionContext ec = op.getExecutionContext();
RDBMSStoreManager storeMgr = table.getStoreManager();
boolean optimisticChecks = false;
if (table.getSurrogateColumn(SurrogateColumnType.SOFTDELETE) != null) {
stmt = softDeleteStmt;
} else {
optimisticChecks = (versionMetaData != null && ec.getTransaction().getOptimistic() && versionChecks);
if (optimisticChecks) {
stmt = deleteStmtOptimistic;
} else {
stmt = deleteStmt;
}
}
// Process the delete of this object
try {
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
// Perform the delete
boolean batch = true;
if (optimisticChecks || !ec.getTransaction().isActive()) {
// Turn OFF batching if doing optimistic checks (since we need the result of the delete)
// or if using nontransactional writes (since we want it sending to the datastore now)
batch = false;
}
PreparedStatement ps = sqlControl.getStatementForUpdate(mconn, stmt, batch);
try {
// provide WHERE clause field(s)
if (cmd.getIdentityType() == IdentityType.DATASTORE) {
StatementMappingIndex mapIdx = mappingStatementIndex.getWhereDatastoreId();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
table.getSurrogateMapping(SurrogateColumnType.DATASTORE_ID, false).setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), op.getInternalObjectId());
}
} else {
StatementClassMapping mappingDefinition = new StatementClassMapping();
StatementMappingIndex[] idxs = mappingStatementIndex.getWhereFields();
for (int i = 0; i < idxs.length; i++) {
if (idxs[i] != null) {
mappingDefinition.addMappingForMember(i, idxs[i]);
}
}
op.provideFields(whereFieldNumbers, new ParameterSetter(op, ps, mappingDefinition));
}
if (multitenancyStatementMapping != null) {
table.getSurrogateMapping(SurrogateColumnType.MULTITENANCY, false).setObject(ec, ps, multitenancyStatementMapping.getParameterPositionsForOccurrence(0), ec.getNucleusContext().getMultiTenancyId(ec, cmd));
}
if (optimisticChecks) {
// WHERE clause - current version discriminator
JavaTypeMapping verMapping = mappingStatementIndex.getWhereVersion().getMapping();
Object currentVersion = op.getTransactionalVersion();
if (currentVersion == null) {
// Somehow the version is not set on this object (not read in ?) so report the bug
String msg = Localiser.msg("052202", op.getInternalObjectId(), table);
NucleusLogger.PERSISTENCE.error(msg);
throw new NucleusException(msg);
}
StatementMappingIndex mapIdx = mappingStatementIndex.getWhereVersion();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
verMapping.setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), currentVersion);
}
}
int[] rcs = sqlControl.executeStatementUpdate(ec, mconn, stmt, ps, !batch);
if (optimisticChecks && rcs[0] == 0) {
// No object deleted so either object disappeared or failed optimistic version checks
throw new NucleusOptimisticException(Localiser.msg("052203", op.getObjectAsPrintable(), op.getInternalObjectId(), "" + op.getTransactionalVersion()), op.getObject());
}
if (relatedObjectsToDelete != null && !relatedObjectsToDelete.isEmpty()) {
// Delete any related objects that need deleting after the delete of this object
Iterator iter = relatedObjectsToDelete.iterator();
while (iter.hasNext()) {
Object relatedObject = iter.next();
ec.deleteObjectInternal(relatedObject);
}
}
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
String msg = Localiser.msg("052211", op.getObjectAsPrintable(), stmt, e.getMessage());
NucleusLogger.DATASTORE_PERSIST.warn(msg);
List exceptions = new ArrayList();
exceptions.add(e);
while ((e = e.getNextException()) != null) {
exceptions.add(e);
}
throw new NucleusDataStoreException(msg, (Throwable[]) exceptions.toArray(new Throwable[exceptions.size()]));
}
}
Aggregations