use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class BulkFetchExistsHandler method getStatementToBulkFetchField.
/**
* Convenience method to generate a bulk-fetch statement for the specified multi-valued field of the owning query.
* @param candidateCmd Metadata for the candidate
* @param parameters Parameters for the query
* @param mmd Metadata for the multi-valued field
* @param datastoreCompilation The datastore compilation of the query
* @param mapperOptions Any options for the query to SQL mapper
* @return The bulk-fetch statement for retrieving this multi-valued field.
*/
public IteratorStatement getStatementToBulkFetchField(AbstractClassMetaData candidateCmd, AbstractMemberMetaData mmd, Query query, Map parameters, RDBMSQueryCompilation datastoreCompilation, Set<String> mapperOptions) {
IteratorStatement iterStmt = null;
ExecutionContext ec = query.getExecutionContext();
ClassLoaderResolver clr = ec.getClassLoaderResolver();
RDBMSStoreManager storeMgr = (RDBMSStoreManager) query.getStoreManager();
Store backingStore = storeMgr.getBackingStoreForField(clr, mmd, null);
if (backingStore instanceof JoinSetStore || backingStore instanceof JoinListStore || backingStore instanceof JoinArrayStore) {
// Set/List/array using join-table : Generate an iterator query of the form
if (backingStore instanceof JoinSetStore) {
iterStmt = ((JoinSetStore) backingStore).getIteratorStatement(ec, ec.getFetchPlan(), false);
} else if (backingStore instanceof JoinListStore) {
iterStmt = ((JoinListStore) backingStore).getIteratorStatement(ec, ec.getFetchPlan(), false, -1, -1);
} else if (backingStore instanceof JoinArrayStore) {
iterStmt = ((JoinArrayStore) backingStore).getIteratorStatement(ec, ec.getFetchPlan(), false);
} else {
throw new NucleusUserException("We do not support BulkFetch using EXISTS for backingStore = " + backingStore);
}
// SELECT ELEM_TBL.COL1, ELEM_TBL.COL2, ... FROM JOIN_TBL INNER_JOIN ELEM_TBL WHERE JOIN_TBL.ELEMENT_ID = ELEM_TBL.ID
// AND EXISTS (SELECT OWNER_TBL.ID FROM OWNER_TBL WHERE (queryWhereClause) AND JOIN_TBL.OWNER_ID = OWNER_TBL.ID)
SelectStatement sqlStmt = iterStmt.getSelectStatement();
JoinTable joinTbl = (JoinTable) sqlStmt.getPrimaryTable().getTable();
JavaTypeMapping joinOwnerMapping = joinTbl.getOwnerMapping();
// Generate the EXISTS subquery (based on the JDOQL/JPQL query)
SelectStatement existsStmt = RDBMSQueryUtils.getStatementForCandidates(storeMgr, sqlStmt, candidateCmd, datastoreCompilation.getResultDefinitionForClass(), ec, query.getCandidateClass(), query.isSubclasses(), query.getResult(), null, null, null);
Set<String> options = new HashSet<>();
if (mapperOptions != null) {
options.addAll(mapperOptions);
}
options.add(QueryToSQLMapper.OPTION_SELECT_CANDIDATE_ID_ONLY);
QueryToSQLMapper sqlMapper = new QueryToSQLMapper(existsStmt, query.getCompilation(), parameters, null, null, candidateCmd, query.isSubclasses(), query.getFetchPlan(), ec, query.getParsedImports(), options, query.getExtensions());
sqlMapper.compile();
// Add EXISTS clause on iterator statement so we can restrict to just the owners in this query
// ORDER BY in EXISTS is forbidden by some RDBMS
existsStmt.setOrdering(null, null);
BooleanExpression existsExpr = new BooleanSubqueryExpression(sqlStmt, "EXISTS", existsStmt);
sqlStmt.whereAnd(existsExpr, true);
// Join to outer statement so we restrict to collection elements for the query candidates
SQLExpression joinTblOwnerExpr = sqlStmt.getRDBMSManager().getSQLExpressionFactory().newExpression(sqlStmt, sqlStmt.getPrimaryTable(), joinOwnerMapping);
SQLExpression existsOwnerExpr = sqlStmt.getRDBMSManager().getSQLExpressionFactory().newExpression(existsStmt, existsStmt.getPrimaryTable(), existsStmt.getPrimaryTable().getTable().getIdMapping());
existsStmt.whereAnd(joinTblOwnerExpr.eq(existsOwnerExpr), true);
// Select the owner candidate so we can separate the collection elements out to their owner
int[] ownerColIndexes = sqlStmt.select(joinTblOwnerExpr, null);
StatementMappingIndex ownerMapIdx = new StatementMappingIndex(existsStmt.getPrimaryTable().getTable().getIdMapping());
ownerMapIdx.setColumnPositions(ownerColIndexes);
iterStmt.setOwnerMapIndex(ownerMapIdx);
} else if (backingStore instanceof FKSetStore || backingStore instanceof FKListStore || backingStore instanceof FKArrayStore) {
if (backingStore instanceof FKSetStore) {
iterStmt = ((FKSetStore) backingStore).getIteratorStatement(ec, ec.getFetchPlan(), false);
} else if (backingStore instanceof FKListStore) {
iterStmt = ((FKListStore) backingStore).getIteratorStatement(ec, ec.getFetchPlan(), false, -1, -1);
} else if (backingStore instanceof FKArrayStore) {
iterStmt = ((FKArrayStore) backingStore).getIteratorStatement(ec, ec.getFetchPlan(), false);
} else {
throw new NucleusUserException("We do not support BulkFetch using EXISTS for backingStore = " + backingStore);
}
// Set/List/array using foreign-key : Generate an iterator query of the form
// SELECT ELEM_TBL.COL1, ELEM_TBL.COL2, ... FROM ELEM_TBL
// WHERE EXISTS (SELECT OWNER_TBL.ID FROM OWNER_TBL WHERE (queryWhereClause) AND ELEM_TBL.OWNER_ID = OWNER_TBL.ID)
SelectStatement sqlStmt = iterStmt.getSelectStatement();
// Generate the EXISTS subquery (based on the JDOQL/JPQL query)
SelectStatement existsStmt = RDBMSQueryUtils.getStatementForCandidates(storeMgr, sqlStmt, candidateCmd, datastoreCompilation.getResultDefinitionForClass(), ec, query.getCandidateClass(), query.isSubclasses(), query.getResult(), null, null, null);
Set<String> options = new HashSet<>();
if (mapperOptions != null) {
options.addAll(mapperOptions);
}
options.add(QueryToSQLMapper.OPTION_SELECT_CANDIDATE_ID_ONLY);
QueryToSQLMapper sqlMapper = new QueryToSQLMapper(existsStmt, query.getCompilation(), parameters, null, null, candidateCmd, query.isSubclasses(), query.getFetchPlan(), ec, query.getParsedImports(), options, query.getExtensions());
sqlMapper.compile();
// Add EXISTS clause on iterator statement so we can restrict to just the owners in this query
// ORDER BY in EXISTS is forbidden by some RDBMS
existsStmt.setOrdering(null, null);
BooleanExpression existsExpr = new BooleanSubqueryExpression(sqlStmt, "EXISTS", existsStmt);
sqlStmt.whereAnd(existsExpr, true);
// Join to outer statement so we restrict to collection elements for the query candidates
SQLExpression elemTblOwnerExpr = sqlStmt.getRDBMSManager().getSQLExpressionFactory().newExpression(sqlStmt, sqlStmt.getPrimaryTable(), ((BaseContainerStore) backingStore).getOwnerMapping());
SQLExpression existsOwnerExpr = sqlStmt.getRDBMSManager().getSQLExpressionFactory().newExpression(existsStmt, existsStmt.getPrimaryTable(), existsStmt.getPrimaryTable().getTable().getIdMapping());
existsStmt.whereAnd(elemTblOwnerExpr.eq(existsOwnerExpr), true);
// Select the owner candidate so we can separate the collection elements out to their owner
int[] ownerColIndexes = sqlStmt.select(elemTblOwnerExpr, null);
StatementMappingIndex ownerMapIdx = new StatementMappingIndex(existsStmt.getPrimaryTable().getTable().getIdMapping());
ownerMapIdx.setColumnPositions(ownerColIndexes);
iterStmt.setOwnerMapIndex(ownerMapIdx);
}
return iterStmt;
}
use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class BulkFetchJoinHandler method getStatementToBulkFetchField.
/**
* Convenience method to generate a bulk-fetch statement for the specified multi-valued field of the owning query.
* @param candidateCmd Metadata for the candidate
* @param mmd Metadata for the member we are bulk-fetching the value(s) for
* @param query The query
* @param parameters Parameters for the query
* @param datastoreCompilation The datastore compilation of the query
* @param mapperOptions Any mapper options for query generation
* @return The statement to use for bulk fetching, together with mappings for extracting the results of the elements
*/
public IteratorStatement getStatementToBulkFetchField(AbstractClassMetaData candidateCmd, AbstractMemberMetaData mmd, Query query, Map parameters, RDBMSQueryCompilation datastoreCompilation, Set<String> mapperOptions) {
ExecutionContext ec = query.getExecutionContext();
ClassLoaderResolver clr = ec.getClassLoaderResolver();
RDBMSStoreManager storeMgr = (RDBMSStoreManager) query.getStoreManager();
Store backingStore = storeMgr.getBackingStoreForField(clr, mmd, null);
if (backingStore instanceof JoinSetStore || backingStore instanceof JoinListStore || backingStore instanceof JoinArrayStore) {
// Set/List/array using join-table : Generate an iterator query of the form
// SELECT ELEM_TBL.COL1, ELEM_TBL.COL2, ... FROM CANDIDATE_TBL T1 INNER JOIN JOIN_TBL T2 ON T2.OWNER_ID = T1.ID INNER_JOIN ELEM_TBL T3 ON T3.ID = T2.ELEM_ID
// WHERE (queryWhereClause)
// TODO Start from the original query, and remove any grouping, having, ordering etc, and join to join table + element table.
} else if (backingStore instanceof FKSetStore || backingStore instanceof FKListStore || backingStore instanceof FKArrayStore) {
// Set/List/array using foreign-key : Generate an iterator query of the form
// SELECT ELEM_TBL.COL1, ELEM_TBL.COL2, ... FROM ELEM_TBL
// WHERE EXISTS (SELECT OWNER_TBL.ID FROM OWNER_TBL WHERE (queryWhereClause) AND ELEM_TBL.OWNER_ID = OWNER_TBL.ID)
// TODO Start from the original query, and remove any grouping, having, ordering etc, and join to element table.
}
throw new NucleusException("BulkFetch via JOIN is not yet implemented");
// return iterStmt;
}
use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class DeleteRequest method execute.
/**
* Method performing the deletion of the record from the datastore.
* Takes the constructed deletion query and populates with the specific record information.
* @param op The ObjectProvider for the record to be deleted.
*/
public void execute(ObjectProvider op) {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
// Debug information about what we are deleting
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052210", op.getObjectAsPrintable(), table));
}
// Process all related fields first
// a). Delete any dependent objects
// b). Null any non-dependent objects with FK at other side
ClassLoaderResolver clr = op.getExecutionContext().getClassLoaderResolver();
Set relatedObjectsToDelete = null;
for (int i = 0; i < callbacks.length; ++i) {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052212", op.getObjectAsPrintable(), ((JavaTypeMapping) callbacks[i]).getMemberMetaData().getFullFieldName()));
}
callbacks[i].preDelete(op);
// Check for any dependent related 1-1 objects where we hold the FK and where the object hasn't been deleted.
// This can happen if this DeleteRequest was triggered by delete-orphans and so the related object has to be deleted *after* this object.
// It's likely we could do this better by using AttachFieldManager and just marking the "orphan" (i.e this object) as deleted
// (see AttachFieldManager TODO regarding when not copying)
JavaTypeMapping mapping = (JavaTypeMapping) callbacks[i];
AbstractMemberMetaData mmd = mapping.getMemberMetaData();
RelationType relationType = mmd.getRelationType(clr);
if (mmd.isDependent() && (relationType == RelationType.ONE_TO_ONE_UNI || (relationType == RelationType.ONE_TO_ONE_BI && mmd.getMappedBy() == null))) {
try {
op.isLoaded(mmd.getAbsoluteFieldNumber());
Object relatedPc = op.provideField(mmd.getAbsoluteFieldNumber());
boolean relatedObjectDeleted = op.getExecutionContext().getApiAdapter().isDeleted(relatedPc);
if (!relatedObjectDeleted) {
if (relatedObjectsToDelete == null) {
relatedObjectsToDelete = new HashSet();
}
relatedObjectsToDelete.add(relatedPc);
}
} catch (// Should be XXXObjectNotFoundException but dont want to use JDO class
Exception e) {
}
}
}
// and cater for other cases, in particular persistent interfaces
if (oneToOneNonOwnerFields != null && oneToOneNonOwnerFields.length > 0) {
for (int i = 0; i < oneToOneNonOwnerFields.length; i++) {
AbstractMemberMetaData relatedFmd = oneToOneNonOwnerFields[i];
updateOneToOneBidirectionalOwnerObjectForField(op, relatedFmd);
}
}
// Choose the statement based on whether optimistic or not
String stmt = null;
ExecutionContext ec = op.getExecutionContext();
RDBMSStoreManager storeMgr = table.getStoreManager();
boolean optimisticChecks = false;
if (table.getSurrogateColumn(SurrogateColumnType.SOFTDELETE) != null) {
stmt = softDeleteStmt;
} else {
optimisticChecks = (versionMetaData != null && ec.getTransaction().getOptimistic() && versionChecks);
if (optimisticChecks) {
stmt = deleteStmtOptimistic;
} else {
stmt = deleteStmt;
}
}
// Process the delete of this object
try {
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
// Perform the delete
boolean batch = true;
if (optimisticChecks || !ec.getTransaction().isActive()) {
// Turn OFF batching if doing optimistic checks (since we need the result of the delete)
// or if using nontransactional writes (since we want it sending to the datastore now)
batch = false;
}
PreparedStatement ps = sqlControl.getStatementForUpdate(mconn, stmt, batch);
try {
// provide WHERE clause field(s)
if (cmd.getIdentityType() == IdentityType.DATASTORE) {
StatementMappingIndex mapIdx = mappingStatementIndex.getWhereDatastoreId();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
table.getSurrogateMapping(SurrogateColumnType.DATASTORE_ID, false).setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), op.getInternalObjectId());
}
} else {
StatementClassMapping mappingDefinition = new StatementClassMapping();
StatementMappingIndex[] idxs = mappingStatementIndex.getWhereFields();
for (int i = 0; i < idxs.length; i++) {
if (idxs[i] != null) {
mappingDefinition.addMappingForMember(i, idxs[i]);
}
}
op.provideFields(whereFieldNumbers, new ParameterSetter(op, ps, mappingDefinition));
}
if (multitenancyStatementMapping != null) {
table.getSurrogateMapping(SurrogateColumnType.MULTITENANCY, false).setObject(ec, ps, multitenancyStatementMapping.getParameterPositionsForOccurrence(0), ec.getNucleusContext().getMultiTenancyId(ec, cmd));
}
if (optimisticChecks) {
// WHERE clause - current version discriminator
JavaTypeMapping verMapping = mappingStatementIndex.getWhereVersion().getMapping();
Object currentVersion = op.getTransactionalVersion();
if (currentVersion == null) {
// Somehow the version is not set on this object (not read in ?) so report the bug
String msg = Localiser.msg("052202", op.getInternalObjectId(), table);
NucleusLogger.PERSISTENCE.error(msg);
throw new NucleusException(msg);
}
StatementMappingIndex mapIdx = mappingStatementIndex.getWhereVersion();
for (int i = 0; i < mapIdx.getNumberOfParameterOccurrences(); i++) {
verMapping.setObject(ec, ps, mapIdx.getParameterPositionsForOccurrence(i), currentVersion);
}
}
int[] rcs = sqlControl.executeStatementUpdate(ec, mconn, stmt, ps, !batch);
if (optimisticChecks && rcs[0] == 0) {
// No object deleted so either object disappeared or failed optimistic version checks
throw new NucleusOptimisticException(Localiser.msg("052203", op.getObjectAsPrintable(), op.getInternalObjectId(), "" + op.getTransactionalVersion()), op.getObject());
}
if (relatedObjectsToDelete != null && !relatedObjectsToDelete.isEmpty()) {
// Delete any related objects that need deleting after the delete of this object
Iterator iter = relatedObjectsToDelete.iterator();
while (iter.hasNext()) {
Object relatedObject = iter.next();
ec.deleteObjectInternal(relatedObject);
}
}
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
String msg = Localiser.msg("052211", op.getObjectAsPrintable(), stmt, e.getMessage());
NucleusLogger.DATASTORE_PERSIST.warn(msg);
List exceptions = new ArrayList();
exceptions.add(e);
while ((e = e.getNextException()) != null) {
exceptions.add(e);
}
throw new NucleusDataStoreException(msg, (Throwable[]) exceptions.toArray(new Throwable[exceptions.size()]));
}
}
use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class FetchRequest method execute.
/* (non-Javadoc)
* @see org.datanucleus.store.rdbms.request.Request#execute(org.datanucleus.state.ObjectProvider)
*/
public void execute(ObjectProvider op) {
if (fieldsToFetch != null && NucleusLogger.PERSISTENCE.isDebugEnabled()) {
// Debug information about what we are retrieving
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052218", op.getObjectAsPrintable(), fieldsToFetch, table));
}
if (((fetchingSurrogateVersion || versionFieldName != null) && numberOfFieldsToFetch == 0) && op.isVersionLoaded()) {
// Fetching only the version and it is already loaded, so do nothing
} else if (statementLocked != null) {
ExecutionContext ec = op.getExecutionContext();
RDBMSStoreManager storeMgr = table.getStoreManager();
boolean locked = ec.getSerializeReadForClass(op.getClassMetaData().getFullClassName());
LockMode lockType = ec.getLockManager().getLockMode(op.getInternalObjectId());
if (lockType != LockMode.LOCK_NONE) {
if (lockType == LockMode.LOCK_PESSIMISTIC_READ || lockType == LockMode.LOCK_PESSIMISTIC_WRITE) {
// Override with pessimistic lock
locked = true;
}
}
String statement = (locked ? statementLocked : statementUnlocked);
StatementClassMapping mappingDef = mappingDefinition;
/*if ((sm.isDeleting() || sm.isDetaching()) && mappingDefinition.hasChildMappingDefinitions())
{
// Don't fetch any children since the object is being deleted
mappingDef = mappingDefinition.cloneStatementMappingWithoutChildren();
}*/
try {
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
PreparedStatement ps = sqlControl.getStatementForQuery(mconn, statement);
AbstractClassMetaData cmd = op.getClassMetaData();
try {
// Provide the primary key field(s) to the JDBC statement
if (cmd.getIdentityType() == IdentityType.DATASTORE) {
StatementMappingIndex datastoreIdx = mappingDef.getMappingForMemberPosition(SurrogateColumnType.DATASTORE_ID.getFieldNumber());
for (int i = 0; i < datastoreIdx.getNumberOfParameterOccurrences(); i++) {
table.getSurrogateMapping(SurrogateColumnType.DATASTORE_ID, false).setObject(ec, ps, datastoreIdx.getParameterPositionsForOccurrence(i), op.getInternalObjectId());
}
} else if (cmd.getIdentityType() == IdentityType.APPLICATION) {
op.provideFields(cmd.getPKMemberPositions(), new ParameterSetter(op, ps, mappingDef));
}
JavaTypeMapping multitenancyMapping = table.getSurrogateMapping(SurrogateColumnType.MULTITENANCY, false);
if (multitenancyMapping != null) {
// Provide the tenant id to the JDBC statement
StatementMappingIndex multitenancyIdx = mappingDef.getMappingForMemberPosition(SurrogateColumnType.MULTITENANCY.getFieldNumber());
String tenantId = ec.getNucleusContext().getMultiTenancyId(ec, cmd);
for (int i = 0; i < multitenancyIdx.getNumberOfParameterOccurrences(); i++) {
multitenancyMapping.setObject(ec, ps, multitenancyIdx.getParameterPositionsForOccurrence(i), tenantId);
}
}
JavaTypeMapping softDeleteMapping = table.getSurrogateMapping(SurrogateColumnType.SOFTDELETE, false);
if (softDeleteMapping != null) {
// Set SoftDelete parameter in statement
StatementMappingIndex softDeleteIdx = mappingDefinition.getMappingForMemberPosition(SurrogateColumnType.SOFTDELETE.getFieldNumber());
for (int i = 0; i < softDeleteIdx.getNumberOfParameterOccurrences(); i++) {
softDeleteMapping.setObject(ec, ps, softDeleteIdx.getParameterPositionsForOccurrence(i), Boolean.FALSE);
}
}
// Execute the statement
ResultSet rs = sqlControl.executeStatementQuery(ec, mconn, statement, ps);
try {
// Check for failure to find the object
if (!rs.next()) {
if (NucleusLogger.DATASTORE_RETRIEVE.isInfoEnabled()) {
NucleusLogger.DATASTORE_RETRIEVE.info(Localiser.msg("050018", op.getInternalObjectId()));
}
throw new NucleusObjectNotFoundException("No such database row", op.getInternalObjectId());
}
// Copy the results into the object
ResultSetGetter rsGetter = new ResultSetGetter(ec, rs, mappingDef, op.getClassMetaData());
rsGetter.setObjectProvider(op);
op.replaceFields(memberNumbersToFetch, rsGetter);
if (op.getTransactionalVersion() == null) {
// Object has no version set so update it from this fetch
Object datastoreVersion = null;
if (fetchingSurrogateVersion) {
// Surrogate version column - get from the result set using the version mapping
StatementMappingIndex verIdx = mappingDef.getMappingForMemberPosition(SurrogateColumnType.VERSION.getFieldNumber());
datastoreVersion = table.getSurrogateMapping(SurrogateColumnType.VERSION, true).getObject(ec, rs, verIdx.getColumnPositions());
} else if (versionFieldName != null) {
// Version field - now populated in the field in the object from the results
datastoreVersion = op.provideField(cmd.getAbsolutePositionOfMember(versionFieldName));
}
op.setVersion(datastoreVersion);
}
} finally {
rs.close();
}
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException sqle) {
String msg = Localiser.msg("052219", op.getObjectAsPrintable(), statement, sqle.getMessage());
NucleusLogger.DATASTORE_RETRIEVE.warn(msg);
List exceptions = new ArrayList();
exceptions.add(sqle);
while ((sqle = sqle.getNextException()) != null) {
exceptions.add(sqle);
}
throw new NucleusDataStoreException(msg, (Throwable[]) exceptions.toArray(new Throwable[exceptions.size()]));
}
}
// Execute any mapping actions now that we have fetched the fields
for (int i = 0; i < callbacks.length; ++i) {
callbacks[i].postFetch(op);
}
}
use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class InsertRequest method execute.
/**
* Method performing the insertion of the record from the datastore.
* Takes the constructed insert query and populates with the specific record information.
* @param op The ObjectProvider for the record to be inserted
*/
public void execute(ObjectProvider op) {
ExecutionContext ec = op.getExecutionContext();
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
// Debug information about what we are inserting
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052207", op.getObjectAsPrintable(), table));
}
try {
VersionMetaData vermd = table.getVersionMetaData();
RDBMSStoreManager storeMgr = table.getStoreManager();
if (vermd != null && vermd.getFieldName() != null) {
// Version field - Update the version in the object
AbstractMemberMetaData verfmd = ((AbstractClassMetaData) vermd.getParent()).getMetaDataForMember(vermd.getFieldName());
Object currentVersion = op.getVersion();
if (currentVersion instanceof Number) {
// Cater for Integer based versions
currentVersion = Long.valueOf(((Number) currentVersion).longValue());
}
Object nextOptimisticVersion = ec.getLockManager().getNextVersion(vermd, currentVersion);
if (verfmd.getType() == Integer.class || verfmd.getType() == int.class) {
// Cater for Integer based versions
nextOptimisticVersion = Integer.valueOf(((Number) nextOptimisticVersion).intValue());
}
op.replaceField(verfmd.getAbsoluteFieldNumber(), nextOptimisticVersion);
}
// Set the state to "inserting" (may already be at this state if multiple inheritance level INSERT)
op.changeActivityState(ActivityState.INSERTING);
SQLController sqlControl = storeMgr.getSQLController();
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
try {
PreparedStatement ps = sqlControl.getStatementForUpdate(mconn, insertStmt, batch, hasIdentityColumn && storeMgr.getDatastoreAdapter().supportsOption(DatastoreAdapter.GET_GENERATED_KEYS_STATEMENT));
try {
StatementClassMapping mappingDefinition = new StatementClassMapping();
StatementMappingIndex[] idxs = stmtMappings;
for (int i = 0; i < idxs.length; i++) {
if (idxs[i] != null) {
mappingDefinition.addMappingForMember(i, idxs[i]);
}
}
// Provide the primary key field(s)
if (table.getIdentityType() == IdentityType.DATASTORE) {
if (!table.isObjectIdDatastoreAttributed() || !table.isBaseDatastoreClass()) {
int[] paramNumber = { IDPARAMNUMBER };
table.getSurrogateMapping(SurrogateColumnType.DATASTORE_ID, false).setObject(ec, ps, paramNumber, op.getInternalObjectId());
}
} else if (table.getIdentityType() == IdentityType.APPLICATION) {
op.provideFields(pkFieldNumbers, new ParameterSetter(op, ps, mappingDefinition));
}
// This provides "persistence-by-reachability" for these fields
if (insertFieldNumbers.length > 0) {
// TODO Support surrogate current-user, create-timestamp
int numberOfFieldsToProvide = 0;
for (int i = 0; i < insertFieldNumbers.length; i++) {
if (insertFieldNumbers[i] < op.getClassMetaData().getMemberCount()) {
AbstractMemberMetaData mmd = op.getClassMetaData().getMetaDataForManagedMemberAtAbsolutePosition(insertFieldNumbers[i]);
if (mmd.isCreateTimestamp()) {
// Set create timestamp to time for the start of this transaction
op.replaceField(insertFieldNumbers[i], new Timestamp(ec.getTransaction().getIsActive() ? ec.getTransaction().getBeginTime() : System.currentTimeMillis()));
} else if (mmd.isCreateUser()) {
// Set create user to current user
op.replaceField(insertFieldNumbers[i], ec.getNucleusContext().getCurrentUser(ec));
}
numberOfFieldsToProvide++;
}
}
int j = 0;
int[] fieldNums = new int[numberOfFieldsToProvide];
for (int i = 0; i < insertFieldNumbers.length; i++) {
if (insertFieldNumbers[i] < op.getClassMetaData().getMemberCount()) {
fieldNums[j++] = insertFieldNumbers[i];
}
}
op.provideFields(fieldNums, new ParameterSetter(op, ps, mappingDefinition));
}
JavaTypeMapping versionMapping = table.getSurrogateMapping(SurrogateColumnType.VERSION, false);
if (versionMapping != null) {
// Surrogate version - set the new version for the object
Object currentVersion = op.getVersion();
Object nextOptimisticVersion = ec.getLockManager().getNextVersion(vermd, currentVersion);
for (int k = 0; k < versionStmtMapping.getNumberOfParameterOccurrences(); k++) {
versionMapping.setObject(ec, ps, versionStmtMapping.getParameterPositionsForOccurrence(k), nextOptimisticVersion);
}
op.setTransactionalVersion(nextOptimisticVersion);
} else if (vermd != null && vermd.getFieldName() != null) {
// Version field - set the new version for the object
Object currentVersion = op.getVersion();
Object nextOptimisticVersion = ec.getLockManager().getNextVersion(vermd, currentVersion);
op.setTransactionalVersion(nextOptimisticVersion);
}
if (multitenancyStmtMapping != null) {
// Multitenancy mapping
table.getSurrogateMapping(SurrogateColumnType.MULTITENANCY, false).setObject(ec, ps, multitenancyStmtMapping.getParameterPositionsForOccurrence(0), ec.getNucleusContext().getMultiTenancyId(ec, op.getClassMetaData()));
}
if (softDeleteStmtMapping != null) {
// Soft-Delete mapping
table.getSurrogateMapping(SurrogateColumnType.SOFTDELETE, false).setObject(ec, ps, softDeleteStmtMapping.getParameterPositionsForOccurrence(0), Boolean.FALSE);
}
JavaTypeMapping discrimMapping = table.getSurrogateMapping(SurrogateColumnType.DISCRIMINATOR, false);
if (discrimMapping != null) {
// Discriminator mapping
Object discVal = op.getClassMetaData().getDiscriminatorValue();
for (int k = 0; k < discriminatorStmtMapping.getNumberOfParameterOccurrences(); k++) {
discrimMapping.setObject(ec, ps, discriminatorStmtMapping.getParameterPositionsForOccurrence(k), discVal);
}
}
// External FK columns (optional)
if (externalFKStmtMappings != null) {
for (int i = 0; i < externalFKStmtMappings.length; i++) {
Object fkValue = op.getAssociatedValue(externalFKStmtMappings[i].getMapping());
if (fkValue != null) {
// Need to provide the owner field number so PCMapping can work out if it is inserted yet
AbstractMemberMetaData ownerFmd = table.getMetaDataForExternalMapping(externalFKStmtMappings[i].getMapping(), MappingType.EXTERNAL_FK);
for (int k = 0; k < externalFKStmtMappings[i].getNumberOfParameterOccurrences(); k++) {
externalFKStmtMappings[i].getMapping().setObject(ec, ps, externalFKStmtMappings[i].getParameterPositionsForOccurrence(k), fkValue, null, ownerFmd.getAbsoluteFieldNumber());
}
} else {
// We're inserting a null so don't need the owner field
for (int k = 0; k < externalFKStmtMappings[i].getNumberOfParameterOccurrences(); k++) {
externalFKStmtMappings[i].getMapping().setObject(ec, ps, externalFKStmtMappings[i].getParameterPositionsForOccurrence(k), null);
}
}
}
}
// External FK discriminator columns (optional)
if (externalFKDiscrimStmtMappings != null) {
for (int i = 0; i < externalFKDiscrimStmtMappings.length; i++) {
Object discrimValue = op.getAssociatedValue(externalFKDiscrimStmtMappings[i].getMapping());
for (int k = 0; k < externalFKDiscrimStmtMappings[i].getNumberOfParameterOccurrences(); k++) {
externalFKDiscrimStmtMappings[i].getMapping().setObject(ec, ps, externalFKDiscrimStmtMappings[i].getParameterPositionsForOccurrence(k), discrimValue);
}
}
}
// External order columns (optional)
if (externalOrderStmtMappings != null) {
for (int i = 0; i < externalOrderStmtMappings.length; i++) {
Object orderValue = op.getAssociatedValue(externalOrderStmtMappings[i].getMapping());
if (orderValue == null) {
// No order value so use -1
orderValue = Integer.valueOf(-1);
}
for (int k = 0; k < externalOrderStmtMappings[i].getNumberOfParameterOccurrences(); k++) {
externalOrderStmtMappings[i].getMapping().setObject(ec, ps, externalOrderStmtMappings[i].getParameterPositionsForOccurrence(k), orderValue);
}
}
}
sqlControl.executeStatementUpdate(ec, mconn, insertStmt, ps, !batch);
if (hasIdentityColumn) {
// Identity was set in the datastore using auto-increment/identity/serial etc
Object newId = getInsertedDatastoreIdentity(ec, sqlControl, op, mconn, ps);
if (NucleusLogger.DATASTORE_PERSIST.isDebugEnabled()) {
NucleusLogger.DATASTORE_PERSIST.debug(Localiser.msg("052206", op.getObjectAsPrintable(), newId));
}
op.setPostStoreNewObjectId(newId);
}
// Execute any mapping actions on the insert of the fields (e.g Oracle CLOBs/BLOBs)
for (int i = 0; i < callbacks.length; ++i) {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052222", op.getObjectAsPrintable(), ((JavaTypeMapping) callbacks[i]).getMemberMetaData().getFullFieldName()));
}
callbacks[i].insertPostProcessing(op);
}
// Update the insert status for this table via the StoreManager
storeMgr.setObjectIsInsertedToLevel(op, table);
// (if we did it the other way around we would get a NotYetFlushedException thrown above).
for (int i = 0; i < relationFieldNumbers.length; i++) {
Object value = op.provideField(relationFieldNumbers[i]);
if (value != null && ec.getApiAdapter().isDetached(value)) {
Object valueAttached = ec.persistObjectInternal(value, null, -1, ObjectProvider.PC);
op.replaceField(relationFieldNumbers[i], valueAttached);
}
}
// Perform reachability on all fields that have no datastore column (1-1 bi non-owner, N-1 bi join)
if (reachableFieldNumbers.length > 0) {
int numberOfReachableFields = 0;
for (int i = 0; i < reachableFieldNumbers.length; i++) {
if (reachableFieldNumbers[i] < op.getClassMetaData().getMemberCount()) {
numberOfReachableFields++;
}
}
int[] fieldNums = new int[numberOfReachableFields];
int j = 0;
for (int i = 0; i < reachableFieldNumbers.length; i++) {
if (reachableFieldNumbers[i] < op.getClassMetaData().getMemberCount()) {
fieldNums[j++] = reachableFieldNumbers[i];
}
}
mappingDefinition = new StatementClassMapping();
idxs = retrievedStmtMappings;
for (int i = 0; i < idxs.length; i++) {
if (idxs[i] != null) {
mappingDefinition.addMappingForMember(i, idxs[i]);
}
}
NucleusLogger.PERSISTENCE.debug("Performing reachability on fields " + StringUtils.intArrayToString(fieldNums));
op.provideFields(fieldNums, new ParameterSetter(op, ps, mappingDefinition));
}
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
String msg = Localiser.msg("052208", op.getObjectAsPrintable(), insertStmt, e.getMessage());
NucleusLogger.DATASTORE_PERSIST.warn(msg);
List exceptions = new ArrayList();
exceptions.add(e);
while ((e = e.getNextException()) != null) {
exceptions.add(e);
}
throw new NucleusDataStoreException(msg, (Throwable[]) exceptions.toArray(new Throwable[exceptions.size()]));
}
// (things like inserting any association parent-child).
for (int i = 0; i < callbacks.length; ++i) {
try {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("052209", op.getObjectAsPrintable(), ((JavaTypeMapping) callbacks[i]).getMemberMetaData().getFullFieldName()));
}
callbacks[i].postInsert(op);
} catch (NotYetFlushedException e) {
op.updateFieldAfterInsert(e.getPersistable(), ((JavaTypeMapping) callbacks[i]).getMemberMetaData().getAbsoluteFieldNumber());
}
}
}
Aggregations