use of org.datanucleus.exceptions.NucleusOptimisticException in project datanucleus-rdbms by datanucleus.
the class DeleteRequest method execute.
/**
* Method performing the deletion of the record from the datastore.
* Takes the constructed deletion query and populates with the specific record information.
* @param op The ObjectProvider for the record to be deleted.
*/
public void execute(ObjectProvider op) {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
// Debug information about what we are deleting
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052210", op.getObjectAsPrintable(), table));
}
// Process all related fields first
// a). Delete any dependent objects
// b). Null any non-dependent objects with FK at other side
ClassLoaderResolver clr = op.getExecutionContext().getClassLoaderResolver();
Set relatedObjectsToDelete = null;
for (int i = 0; i < callbacks.length; ++i) {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052212", op.getObjectAsPrintable(), ((JavaTypeMapping) callbacks[i]).getMemberMetaData().getFullFieldName()));
}
callbacks[i].preDelete(op);
// Check for any dependent related 1-1 objects where we hold the FK and where the object hasn't been deleted.
// This can happen if this DeleteRequest was triggered by delete-orphans and so the related object has to be deleted *after* this object.
// It's likely we could do this better by using AttachFieldManager and just marking the "orphan" (i.e this object) as deleted
// (see AttachFieldManager TODO regarding when not copying)
JavaTypeMapping mapping = (JavaTypeMapping) callbacks[i];
AbstractMemberMetaData mmd = mapping.getMemberMetaData();
RelationType relationType = mmd.getRelationType(clr);
if (mmd.isDependent() && (relationType == RelationType.ONE_TO_ONE_UNI || (relationType == RelationType.ONE_TO_ONE_BI && mmd.getMappedBy() == null))) {
try {
op.isLoaded(mmd.getAbsoluteFieldNumber());
Object relatedPc = op.provideField(mmd.getAbsoluteFieldNumber());
boolean relatedObjectDeleted = op.getExecutionContext().getApiAdapter().isDeleted(relatedPc);
if (!relatedObjectDeleted) {
if (relatedObjectsToDelete == null) {
relatedObjectsToDelete = new HashSet();
}
relatedObjectsToDelete.add(relatedPc);
}
} catch (// Should be XXXObjectNotFoundException but dont want to use JDO class
Exception e) {
}
}
}
// and cater for other cases, in particular persistent interfaces
if (oneToOneNonOwnerFields != null && oneToOneNonOwnerFields.length > 0) {
for (int i = 0; i < oneToOneNonOwnerFields.length; i++) {
AbstractMemberMetaData relatedFmd = oneToOneNonOwnerFields[i];
updateOneToOneBidirectionalOwnerObjectForField(op, relatedFmd);
}
}
// Choose the statement based on whether optimistic or not
String stmt = null;
ExecutionContext ec = op.getExecutionContext();
RDBMSStoreManager storeMgr = table.getStoreManager();
boolean optimisticChecks = false;
if (table.getSurrogateColumn(SurrogateColumnType.SOFTDELETE) != null) {
stmt = softDeleteStmt;
} else {
optimisticChecks = (versionMetaData != null && ec.getTransaction().getOptimistic() && versionChecks);
if (optimisticChecks) {
stmt = deleteStmtOptimistic;
} else {
stmt = deleteStmt;
}
}
// Process the delete of this object
try {
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
// Perform the delete
boolean batch = true;
if (optimisticChecks || !ec.getTransaction().isActive()) {
// Turn OFF batching if doing optimistic checks (since we need the result of the delete)
// or if using nontransactional writes (since we want it sending to the datastore now)
batch = false;
}
PreparedStatement ps = sqlControl.getStatementForUpdate(mconn, stmt, batch);
try {
// provide WHERE clause field(s)
if (cmd.getIdentityType() == IdentityType.DATASTORE) {
StatementMappingIndex mapIdx = mappingStatementIndex.getWhereDatastoreId();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
table.getSurrogateMapping(SurrogateColumnType.DATASTORE_ID, false).setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), op.getInternalObjectId());
}
} else {
StatementClassMapping mappingDefinition = new StatementClassMapping();
StatementMappingIndex[] idxs = mappingStatementIndex.getWhereFields();
for (int i = 0; i < idxs.length; i++) {
if (idxs[i] != null) {
mappingDefinition.addMappingForMember(i, idxs[i]);
}
}
op.provideFields(whereFieldNumbers, new ParameterSetter(op, ps, mappingDefinition));
}
if (multitenancyStatementMapping != null) {
table.getSurrogateMapping(SurrogateColumnType.MULTITENANCY, false).setObject(ec, ps, multitenancyStatementMapping.getParameterPositionsForOccurrence(0), ec.getNucleusContext().getMultiTenancyId(ec, cmd));
}
if (optimisticChecks) {
// WHERE clause - current version discriminator
JavaTypeMapping verMapping = mappingStatementIndex.getWhereVersion().getMapping();
Object currentVersion = op.getTransactionalVersion();
if (currentVersion == null) {
// Somehow the version is not set on this object (not read in ?) so report the bug
String msg = Localiser.msg("052202", op.getInternalObjectId(), table);
NucleusLogger.PERSISTENCE.error(msg);
throw new NucleusException(msg);
}
StatementMappingIndex mapIdx = mappingStatementIndex.getWhereVersion();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
verMapping.setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), currentVersion);
}
}
int[] rcs = sqlControl.executeStatementUpdate(ec, mconn, stmt, ps, !batch);
if (optimisticChecks && rcs[0] == 0) {
// No object deleted so either object disappeared or failed optimistic version checks
throw new NucleusOptimisticException(Localiser.msg("052203", op.getObjectAsPrintable(), op.getInternalObjectId(), "" + op.getTransactionalVersion()), op.getObject());
}
if (relatedObjectsToDelete != null && !relatedObjectsToDelete.isEmpty()) {
// Delete any related objects that need deleting after the delete of this object
Iterator iter = relatedObjectsToDelete.iterator();
while (iter.hasNext()) {
Object relatedObject = iter.next();
ec.deleteObjectInternal(relatedObject);
}
}
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
String msg = Localiser.msg("052211", op.getObjectAsPrintable(), stmt, e.getMessage());
NucleusLogger.DATASTORE_PERSIST.warn(msg);
List exceptions = new ArrayList();
exceptions.add(e);
while ((e = e.getNextException()) != null) {
exceptions.add(e);
}
throw new NucleusDataStoreException(msg, (Throwable[]) exceptions.toArray(new Throwable[exceptions.size()]));
}
}
use of org.datanucleus.exceptions.NucleusOptimisticException in project datanucleus-rdbms by datanucleus.
the class UpdateRequest method execute.
/**
* Method performing the update of the record in the datastore.
* Takes the constructed update query and populates with the specific record information.
* @param op The ObjectProvider for the record to be updated
*/
public void execute(ObjectProvider op) {
// Choose the statement based on whether optimistic or not
String stmt = null;
ExecutionContext ec = op.getExecutionContext();
boolean optimisticChecks = (versionMetaData != null && ec.getTransaction().getOptimistic() && versionChecks);
stmt = optimisticChecks ? updateStmtOptimistic : updateStmt;
if (stmt != null) {
// TODO Support surrogate update user/timestamp
AbstractMemberMetaData[] mmds = cmd.getManagedMembers();
for (int i = 0; i < mmds.length; i++) {
if (// TODO Make this accessible from cmd
mmds[i].isUpdateTimestamp()) {
op.replaceField(mmds[i].getAbsoluteFieldNumber(), new Timestamp(ec.getTransaction().getIsActive() ? ec.getTransaction().getBeginTime() : System.currentTimeMillis()));
} else if (// TODO Make this accessible from cmd
mmds[i].isUpdateUser()) {
op.replaceField(mmds[i].getAbsoluteFieldNumber(), ec.getNucleusContext().getCurrentUser(ec));
}
}
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
// Debug info about fields being updated
StringBuilder fieldStr = new StringBuilder();
if (updateFieldNumbers != null) {
for (int i = 0; i < updateFieldNumbers.length; i++) {
if (fieldStr.length() > 0) {
fieldStr.append(",");
}
fieldStr.append(cmd.getMetaDataForManagedMemberAtAbsolutePosition(updateFieldNumbers[i]).getName());
}
}
if (versionMetaData != null && versionMetaData.getFieldName() == null) {
if (fieldStr.length() > 0) {
fieldStr.append(",");
}
fieldStr.append("[VERSION]");
}
// Debug information about what we are updating
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052214", op.getObjectAsPrintable(), fieldStr.toString(), table));
}
RDBMSStoreManager storeMgr = table.getStoreManager();
boolean batch = false;
// TODO Set the batch flag based on whether we have no other SQL being invoked in here just our UPDATE
try {
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
// Perform the update
PreparedStatement ps = sqlControl.getStatementForUpdate(mconn, stmt, batch);
try {
Object currentVersion = op.getTransactionalVersion();
Object nextVersion = null;
if (// TODO What if strategy is NONE?
versionMetaData != null) {
// Set the next version in the object
if (versionMetaData.getFieldName() != null) {
// Version field
AbstractMemberMetaData verfmd = cmd.getMetaDataForMember(table.getVersionMetaData().getFieldName());
if (currentVersion instanceof Number) {
// Cater for Integer-based versions
currentVersion = Long.valueOf(((Number) currentVersion).longValue());
}
nextVersion = ec.getLockManager().getNextVersion(versionMetaData, currentVersion);
if (verfmd.getType() == Integer.class || verfmd.getType() == int.class) {
// Cater for Integer-based versions
nextVersion = Integer.valueOf(((Number) nextVersion).intValue());
}
op.replaceField(verfmd.getAbsoluteFieldNumber(), nextVersion);
} else {
// Surrogate version column
nextVersion = ec.getLockManager().getNextVersion(versionMetaData, currentVersion);
}
op.setTransactionalVersion(nextVersion);
}
// SELECT clause - set the required fields to be updated
if (updateFieldNumbers != null) {
StatementClassMapping mappingDefinition = new StatementClassMapping();
StatementMappingIndex[] idxs = stmtMappingDefinition.getUpdateFields();
for (int i = 0; i < idxs.length; i++) {
if (idxs[i] != null) {
mappingDefinition.addMappingForMember(i, idxs[i]);
}
}
op.provideFields(updateFieldNumbers, new ParameterSetter(op, ps, mappingDefinition));
}
if (versionMetaData != null && versionMetaData.getFieldName() == null) {
// SELECT clause - set the surrogate version column to the new version
StatementMappingIndex mapIdx = stmtMappingDefinition.getUpdateVersion();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
table.getSurrogateMapping(SurrogateColumnType.VERSION, false).setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), nextVersion);
}
}
// WHERE clause - primary key fields
if (table.getIdentityType() == IdentityType.DATASTORE) {
// a). datastore identity
StatementMappingIndex mapIdx = stmtMappingDefinition.getWhereDatastoreId();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
table.getSurrogateMapping(SurrogateColumnType.DATASTORE_ID, false).setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), op.getInternalObjectId());
}
} else {
// b). application/nondurable identity
StatementClassMapping mappingDefinition = new StatementClassMapping();
StatementMappingIndex[] idxs = stmtMappingDefinition.getWhereFields();
for (int i = 0; i < idxs.length; i++) {
if (idxs[i] != null) {
mappingDefinition.addMappingForMember(i, idxs[i]);
}
}
FieldManager fm = null;
if (cmd.getIdentityType() == IdentityType.NONDURABLE) {
fm = new OldValueParameterSetter(op, ps, mappingDefinition);
} else {
fm = new ParameterSetter(op, ps, mappingDefinition);
}
op.provideFields(whereFieldNumbers, fm);
}
if (optimisticChecks) {
if (currentVersion == null) {
// Somehow the version is not set on this object (not read in ?) so report the bug
String msg = Localiser.msg("052201", op.getInternalObjectId(), table);
NucleusLogger.PERSISTENCE.error(msg);
throw new NucleusException(msg);
}
// WHERE clause - current version discriminator
StatementMappingIndex mapIdx = stmtMappingDefinition.getWhereVersion();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
mapIdx.getMapping().setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), currentVersion);
}
}
int[] rcs = sqlControl.executeStatementUpdate(ec, mconn, stmt, ps, !batch);
if (rcs[0] == 0 && optimisticChecks) {
// TODO Batching : when we use batching here we need to process these somehow
throw new NucleusOptimisticException(Localiser.msg("052203", op.getObjectAsPrintable(), op.getInternalObjectId(), "" + currentVersion), op.getObject());
}
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
String msg = Localiser.msg("052215", op.getObjectAsPrintable(), stmt, StringUtils.getStringFromStackTrace(e));
NucleusLogger.DATASTORE_PERSIST.error(msg);
List exceptions = new ArrayList();
exceptions.add(e);
while ((e = e.getNextException()) != null) {
exceptions.add(e);
}
throw new NucleusDataStoreException(msg, (Throwable[]) exceptions.toArray(new Throwable[exceptions.size()]));
}
}
// Execute any mapping actions now that we have done the update
for (int i = 0; i < callbacks.length; ++i) {
try {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052216", op.getObjectAsPrintable(), ((JavaTypeMapping) callbacks[i]).getMemberMetaData().getFullFieldName()));
}
callbacks[i].postUpdate(op);
} catch (NotYetFlushedException e) {
op.updateFieldAfterInsert(e.getPersistable(), ((JavaTypeMapping) callbacks[i]).getMemberMetaData().getAbsoluteFieldNumber());
}
}
}
use of org.datanucleus.exceptions.NucleusOptimisticException in project datanucleus-rdbms by datanucleus.
the class FlushReferential method execute.
/* (non-Javadoc)
* @see org.datanucleus.FlushOrdered#execute(org.datanucleus.ExecutionContext, java.util.List, java.util.List, org.datanucleus.flush.OperationQueue)
*/
@Override
public List<NucleusOptimisticException> execute(ExecutionContext ec, List<ObjectProvider> primaryOPs, List<ObjectProvider> secondaryOPs, OperationQueue opQueue) {
List<NucleusOptimisticException> flushExcps = null;
// Phase 1 : Find all objects that have no relations or external FKs and process first
Set<ObjectProvider> unrelatedOPs = null;
if (primaryOPs != null) {
Iterator<ObjectProvider> opIter = primaryOPs.iterator();
while (opIter.hasNext()) {
ObjectProvider op = opIter.next();
if (!op.isEmbedded() && isClassSuitableForBatching(ec, op.getClassMetaData())) {
if (unrelatedOPs == null) {
unrelatedOPs = new HashSet<>();
}
unrelatedOPs.add(op);
opIter.remove();
}
}
}
if (secondaryOPs != null) {
Iterator<ObjectProvider> opIter = secondaryOPs.iterator();
while (opIter.hasNext()) {
ObjectProvider op = opIter.next();
if (!op.isEmbedded() && isClassSuitableForBatching(ec, op.getClassMetaData())) {
if (unrelatedOPs == null) {
unrelatedOPs = new HashSet<>();
}
unrelatedOPs.add(op);
opIter.remove();
}
}
}
if (unrelatedOPs != null) {
// Process DELETEs, then INSERTs, then UPDATEs
FlushNonReferential groupedFlush = new FlushNonReferential();
flushExcps = groupedFlush.flushDeleteInsertUpdateGrouped(unrelatedOPs, ec);
}
// Phase 2 : Fallback to FlushOrdered handling for remaining objects
List<NucleusOptimisticException> excps = super.execute(ec, primaryOPs, secondaryOPs, opQueue);
// Return any exceptions
if (excps != null) {
if (flushExcps == null) {
flushExcps = excps;
} else {
flushExcps.addAll(excps);
}
}
return flushExcps;
}
use of org.datanucleus.exceptions.NucleusOptimisticException in project datanucleus-core by datanucleus.
the class ExecutionContextImpl method flushInternal.
/**
* This method flushes all dirty, new, and deleted instances to the datastore.
* @param flushToDatastore Whether to ensure any changes reach the datastore
* Otherwise they will be flushed to the datastore manager and leave it to
* decide the opportune moment to actually flush them to the datastore
* @throws NucleusOptimisticException when optimistic locking error(s) occur
*/
public void flushInternal(boolean flushToDatastore) {
if (!flushToDatastore && dirtyOPs.isEmpty() && indirectDirtyOPs.isEmpty()) {
// Nothing to flush so abort now
return;
}
if (!tx.isActive()) {
// Non transactional flush, so store the ids for later
if (nontxProcessedOPs == null) {
nontxProcessedOPs = new HashSet<>();
}
nontxProcessedOPs.addAll(dirtyOPs);
nontxProcessedOPs.addAll(indirectDirtyOPs);
}
flushing++;
try {
if (flushToDatastore) {
// Make sure flushes its changes to the datastore
tx.preFlush();
}
// Retrieve the appropriate flush process, and execute it
FlushProcess flusher = getStoreManager().getFlushProcess();
List<NucleusOptimisticException> optimisticFailures = flusher.execute(this, dirtyOPs, indirectDirtyOPs, operationQueue);
if (flushToDatastore) {
// Make sure flushes its changes to the datastore
tx.flush();
}
if (optimisticFailures != null) {
// Throw a single NucleusOptimisticException containing all optimistic failures
throw new NucleusOptimisticException(Localiser.msg("010031"), optimisticFailures.toArray(new Throwable[optimisticFailures.size()]));
}
} finally {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("010004"));
}
flushing--;
}
}
use of org.datanucleus.exceptions.NucleusOptimisticException in project datanucleus-core by datanucleus.
the class FlushOrdered method execute.
/* (non-Javadoc)
* @see org.datanucleus.FlushProcess#execute(org.datanucleus.ExecutionContext, java.util.List, java.util.List, org.datanucleus.flush.OperationQueue)
*/
public List<NucleusOptimisticException> execute(ExecutionContext ec, List<ObjectProvider> primaryOPs, List<ObjectProvider> secondaryOPs, OperationQueue opQueue) {
// Note that opQueue is not processed directly here, but instead will be processed via callbacks from the persistence of other objects
// TODO The opQueue needs to be processed from here instead of via the callbacks, see NUCCORE-904
List<NucleusOptimisticException> optimisticFailures = null;
// Make copy of ObjectProviders so we don't have ConcurrentModification issues
Object[] toFlushPrimary = null;
Object[] toFlushSecondary = null;
try {
if (// Why lock here? should be on overall flush
ec.getMultithreaded()) {
ec.getLock().lock();
}
if (primaryOPs != null) {
toFlushPrimary = primaryOPs.toArray();
primaryOPs.clear();
}
if (secondaryOPs != null) {
toFlushSecondary = secondaryOPs.toArray();
secondaryOPs.clear();
}
} finally {
if (ec.getMultithreaded()) {
ec.getLock().unlock();
}
}
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
int total = 0;
if (toFlushPrimary != null) {
total += toFlushPrimary.length;
}
if (toFlushSecondary != null) {
total += toFlushSecondary.length;
}
NucleusLogger.PERSISTENCE.debug(Localiser.msg("010003", total));
}
Set<Class> classesToFlush = null;
if (ec.getNucleusContext().getStoreManager().getQueryManager().getQueryResultsCache() != null) {
classesToFlush = new HashSet();
}
// a). primary dirty objects
if (toFlushPrimary != null) {
for (int i = 0; i < toFlushPrimary.length; i++) {
ObjectProvider op = (ObjectProvider) toFlushPrimary[i];
try {
op.flush();
if (classesToFlush != null && op.getObject() != null) {
classesToFlush.add(op.getObject().getClass());
}
} catch (NucleusOptimisticException oe) {
if (optimisticFailures == null) {
optimisticFailures = new ArrayList();
}
optimisticFailures.add(oe);
}
}
}
// b). secondary dirty objects
if (toFlushSecondary != null) {
for (int i = 0; i < toFlushSecondary.length; i++) {
ObjectProvider op = (ObjectProvider) toFlushSecondary[i];
try {
op.flush();
if (classesToFlush != null && op.getObject() != null) {
classesToFlush.add(op.getObject().getClass());
}
} catch (NucleusOptimisticException oe) {
if (optimisticFailures == null) {
optimisticFailures = new ArrayList();
}
optimisticFailures.add(oe);
}
}
}
if (opQueue != null) {
if (!ec.getStoreManager().usesBackedSCOWrappers()) {
// This ExecutionContext is not using backing store SCO wrappers, so process SCO Operations for cascade delete etc.
opQueue.processOperationsForNoBackingStoreSCOs(ec);
}
opQueue.clearPersistDeleteUpdateOperations();
}
if (classesToFlush != null) {
// Flush any query results from cache for these types
Iterator<Class> queryClsIter = classesToFlush.iterator();
while (queryClsIter.hasNext()) {
Class cls = queryClsIter.next();
ec.getNucleusContext().getStoreManager().getQueryManager().evictQueryResultsForType(cls);
}
}
return optimisticFailures;
}
Aggregations