use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class OracleMapMapping method postInsert.
/**
* Retrieve the empty BLOB created by the insert statement and write out the
* current BLOB field value to the Oracle CLOB object.
* @param ownerOP ObjectProvider of the owner
*/
public void postInsert(ObjectProvider ownerOP) {
if (containerIsStoredInSingleColumn()) {
ExecutionContext ec = ownerOP.getExecutionContext();
java.util.Map value = (java.util.Map) ownerOP.provideField(mmd.getAbsoluteFieldNumber());
// Do nothing when serialised since we are handled in the main request
if (value != null) {
if (mmd.getMap().keyIsPersistent() || mmd.getMap().valueIsPersistent()) {
// Make sure all persistable keys/values have ObjectProviders
Set entries = value.entrySet();
Iterator iter = entries.iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
if (mmd.getMap().keyIsPersistent() && entry.getKey() != null) {
Object key = entry.getKey();
if (ec.findObjectProvider(key) == null || ec.getApiAdapter().getExecutionContext(key) == null) {
ec.getNucleusContext().getObjectProviderFactory().newForEmbedded(ec, key, false, ownerOP, mmd.getAbsoluteFieldNumber());
}
}
if (mmd.getMap().valueIsPersistent() && entry.getValue() != null) {
Object val = entry.getValue();
if (ec.findObjectProvider(val) == null || ec.getApiAdapter().getExecutionContext(val) == null) {
ec.getNucleusContext().getObjectProviderFactory().newForEmbedded(ec, val, false, ownerOP, mmd.getAbsoluteFieldNumber());
}
}
}
}
}
// Generate the contents for the BLOB
byte[] bytes = new byte[0];
if (value != null) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeObject(value);
bytes = baos.toByteArray();
} catch (IOException e1) {
// Do Nothing
}
}
// Update the BLOB
OracleBlobRDBMSMapping.updateBlobColumn(ownerOP, getTable(), getDatastoreMapping(0), bytes);
} else {
super.postInsert(ownerOP);
}
}
use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class MapMapping method postUpdate.
/**
* Method to be called after any update of the owner class element.
* @param ownerOP ObjectProvider of the owner
*/
public void postUpdate(ObjectProvider ownerOP) {
ExecutionContext ec = ownerOP.getExecutionContext();
RDBMSStoreManager storeMgr = table.getStoreManager();
java.util.Map value = (java.util.Map) ownerOP.provideField(getAbsoluteFieldNumber());
if (containerIsStoredInSingleColumn()) {
// Do nothing when serialised since we are handled in the main request
if (value != null) {
if (mmd.getMap().keyIsPersistent() || mmd.getMap().valueIsPersistent()) {
// Make sure all persistable keys/values have ObjectProviders
Set entries = value.entrySet();
Iterator iter = entries.iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
if (mmd.getMap().keyIsPersistent() && entry.getKey() != null) {
Object key = entry.getKey();
if (ec.findObjectProvider(key) == null || ec.getApiAdapter().getExecutionContext(key) == null) {
ec.getNucleusContext().getObjectProviderFactory().newForEmbedded(ec, key, false, ownerOP, mmd.getAbsoluteFieldNumber());
}
}
if (mmd.getMap().valueIsPersistent() && entry.getValue() != null) {
Object val = entry.getValue();
if (ec.findObjectProvider(val) == null || ec.getApiAdapter().getExecutionContext(val) == null) {
ec.getNucleusContext().getObjectProviderFactory().newForEmbedded(ec, val, false, ownerOP, mmd.getAbsoluteFieldNumber());
}
}
}
}
}
return;
}
if (value == null) {
// remove any entries in the map and replace it with an empty SCO wrapper
((MapStore) storeMgr.getBackingStoreForField(ec.getClassLoaderResolver(), mmd, null)).clear(ownerOP);
replaceFieldWithWrapper(ownerOP, null);
return;
}
if (value instanceof BackedSCO) {
// Already have a SCO value, so flush outstanding updates
ownerOP.getExecutionContext().flushOperationsForBackingStore(((BackedSCO) value).getBackingStore(), ownerOP);
return;
}
if (mmd.isCascadeUpdate()) {
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("007009", mmd.getFullFieldName()));
}
// Update the datastore with this value of map (clear old entries and add new ones)
// This method could be called in two situations
// 1). Update a map field of an object, so UpdateRequest is called, which calls here
// 2). Persist a new object, and it needed to wait til the element was inserted so
// goes into dirty state and then flush() triggers UpdateRequest, which comes here
MapStore store = ((MapStore) storeMgr.getBackingStoreForField(ec.getClassLoaderResolver(), mmd, value.getClass()));
// TODO Consider making this more efficient picking the ones to remove/add
// e.g use an update() method on the backing store like for CollectionStore
store.clear(ownerOP);
store.putAll(ownerOP, value);
// Replace the field with a wrapper containing these entries
replaceFieldWithWrapper(ownerOP, value);
} else {
// User doesnt want to update by reachability
if (NucleusLogger.PERSISTENCE.isDebugEnabled()) {
NucleusLogger.PERSISTENCE.debug(Localiser.msg("007008", mmd.getFullFieldName()));
}
return;
}
}
use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class FKArrayStore method clear.
/**
* Method to clear the Array.
* This is called when the container object is being deleted and the elements are to be removed (maybe for dependent field).
* @param ownerOP The ObjectProvider
*/
public void clear(ObjectProvider ownerOP) {
boolean deleteElements = false;
if (ownerMemberMetaData.getArray().isDependentElement()) {
// Elements are dependent and can't exist on their own, so delete them all
NucleusLogger.DATASTORE.debug(Localiser.msg("056034"));
deleteElements = true;
} else {
if (ownerMapping.isNullable() && orderMapping.isNullable()) {
// Field is not dependent, and nullable so we null the FK
NucleusLogger.DATASTORE.debug(Localiser.msg("056036"));
deleteElements = false;
} else {
// Field is not dependent, and not nullable so we just delete the elements
NucleusLogger.DATASTORE.debug(Localiser.msg("056035"));
deleteElements = true;
}
}
if (deleteElements) {
// Make sure the field is loaded
ownerOP.isLoaded(ownerMemberMetaData.getAbsoluteFieldNumber());
Object[] value = (Object[]) ownerOP.provideField(ownerMemberMetaData.getAbsoluteFieldNumber());
if (value != null && value.length > 0) {
ownerOP.getExecutionContext().deleteObjects(value);
}
} else {
boolean ownerSoftDelete = ownerOP.getClassMetaData().hasExtension(MetaData.EXTENSION_CLASS_SOFTDELETE);
if (!ownerSoftDelete) {
// TODO Cater for multiple element roots
// TODO If the relation is bidirectional we need to clear the owner in the element
String clearNullifyStmt = getClearNullifyStmt();
try {
ExecutionContext ec = ownerOP.getExecutionContext();
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
PreparedStatement ps = sqlControl.getStatementForUpdate(mconn, clearNullifyStmt, false);
try {
int jdbcPosition = 1;
jdbcPosition = BackingStoreHelper.populateOwnerInStatement(ownerOP, ec, ps, jdbcPosition, this);
if (relationDiscriminatorMapping != null) {
BackingStoreHelper.populateRelationDiscriminatorInStatement(ec, ps, jdbcPosition, this);
}
sqlControl.executeStatementUpdate(ec, mconn, clearNullifyStmt, ps, true);
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException e) {
throw new NucleusDataStoreException(Localiser.msg("056013", clearNullifyStmt), e);
}
}
}
}
use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class FKArrayStore method iterator.
/**
* Accessor for an iterator for the set.
* @param ownerOP ObjectProvider for the set.
* @return Iterator for the set.
*/
public Iterator<E> iterator(ObjectProvider ownerOP) {
ExecutionContext ec = ownerOP.getExecutionContext();
if (elementInfo == null || elementInfo.length == 0) {
return null;
}
// Generate the statement, and statement mapping/parameter information
IteratorStatement iterStmt = getIteratorStatement(ownerOP.getExecutionContext(), ownerOP.getExecutionContext().getFetchPlan(), true);
SelectStatement sqlStmt = iterStmt.getSelectStatement();
StatementClassMapping iteratorMappingDef = iterStmt.getStatementClassMapping();
// Input parameter(s) - the owner
int inputParamNum = 1;
StatementMappingIndex ownerIdx = new StatementMappingIndex(ownerMapping);
if (sqlStmt.getNumberOfUnions() > 0) {
// Add parameter occurrence for each union of statement
for (int j = 0; j < sqlStmt.getNumberOfUnions() + 1; j++) {
int[] paramPositions = new int[ownerMapping.getNumberOfDatastoreMappings()];
for (int k = 0; k < ownerMapping.getNumberOfDatastoreMappings(); k++) {
paramPositions[k] = inputParamNum++;
}
ownerIdx.addParameterOccurrence(paramPositions);
}
} else {
int[] paramPositions = new int[ownerMapping.getNumberOfDatastoreMappings()];
for (int k = 0; k < ownerMapping.getNumberOfDatastoreMappings(); k++) {
paramPositions[k] = inputParamNum++;
}
ownerIdx.addParameterOccurrence(paramPositions);
}
StatementParameterMapping iteratorMappingParams = new StatementParameterMapping();
iteratorMappingParams.addMappingForParameter("owner", ownerIdx);
if (ec.getTransaction().getSerializeRead() != null && ec.getTransaction().getSerializeRead()) {
sqlStmt.addExtension(SQLStatement.EXTENSION_LOCK_FOR_UPDATE, true);
}
String stmt = sqlStmt.getSQLText().toSQL();
try {
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
SQLController sqlControl = storeMgr.getSQLController();
try {
// Create the statement
PreparedStatement ps = sqlControl.getStatementForQuery(mconn, stmt);
// Set the owner
ObjectProvider stmtOwnerOP = BackingStoreHelper.getOwnerObjectProviderForBackingStore(ownerOP);
int numParams = ownerIdx.getNumberOfParameterOccurrences();
for (int paramInstance = 0; paramInstance < numParams; paramInstance++) {
ownerIdx.getMapping().setObject(ec, ps, ownerIdx.getParameterPositionsForOccurrence(paramInstance), stmtOwnerOP.getObject());
}
try {
ResultSet rs = sqlControl.executeStatementQuery(ec, mconn, stmt, ps);
try {
ResultObjectFactory rof = null;
if (elementsAreEmbedded || elementsAreSerialised) {
throw new NucleusException("Cannot have FK array with non-persistent objects");
}
rof = new PersistentClassROF(ec, rs, false, iteratorMappingDef, elementCmd, clr.classForName(elementType));
return new ArrayStoreIterator(ownerOP, rs, rof, this);
} finally {
rs.close();
}
} finally {
sqlControl.closeStatement(mconn, ps);
}
} finally {
mconn.release();
}
} catch (SQLException | MappedDatastoreException e) {
throw new NucleusDataStoreException(Localiser.msg("056006", stmt), e);
}
}
use of org.datanucleus.ExecutionContext in project datanucleus-rdbms by datanucleus.
the class FKListStore method internalAdd.
/**
* Internal method for adding an item to the List.
* @param ownerOP ObjectProvider for the owner
* @param startAt The start position
* @param atEnd Whether to add at the end
* @param c The Collection of elements to add.
* @param size Current size of list (if known). -1 if not known
* @return Whether it was successful
*/
protected boolean internalAdd(ObjectProvider ownerOP, int startAt, boolean atEnd, Collection<E> c, int size) {
if (c == null || c.size() == 0) {
return true;
}
// Check what we have persistent already
int currentListSize = (size < 0 ? size(ownerOP) : size);
boolean shiftingElements = true;
if (atEnd || startAt == currentListSize) {
shiftingElements = false;
// Not shifting so we insert from the end
startAt = currentListSize;
}
boolean elementsNeedPositioning = false;
int position = startAt;
Iterator elementIter = c.iterator();
while (elementIter.hasNext()) {
// Persist any non-persistent objects optionally at their final list position (persistence-by-reachability)
if (shiftingElements) {
// We have to shift things so dont bother with positioning
position = -1;
}
boolean inserted = validateElementForWriting(ownerOP, elementIter.next(), position);
if (!inserted || shiftingElements) {
// This element wasnt positioned in the validate so we need to set the positions later
elementsNeedPositioning = true;
}
if (!shiftingElements) {
position++;
}
}
if (shiftingElements) {
// all ids after the position we insert at
try {
// Calculate the amount we need to shift any existing elements by
// This is used where inserting between existing elements and have to shift down all elements after the start point
int shift = c.size();
ExecutionContext ec = ownerOP.getExecutionContext();
ManagedConnection mconn = storeMgr.getConnectionManager().getConnection(ec);
try {
// shift up existing elements after start position by "shift"
for (int i = currentListSize - 1; i >= startAt; i--) {
// Shift the index of this row by "shift"
internalShift(ownerOP, mconn, true, i, shift, false);
}
} finally {
mconn.release();
}
} catch (MappedDatastoreException e) {
// An error was encountered during the shift process so abort here
throw new NucleusDataStoreException(Localiser.msg("056009", e.getMessage()), e.getCause());
}
}
if (shiftingElements || elementsNeedPositioning) {
// Some elements have been shifted so the new elements need positioning now, or we already had some
// of the new elements persistent and so they need their positions setting now
elementIter = c.iterator();
while (elementIter.hasNext()) {
Object element = elementIter.next();
updateElementFk(ownerOP, element, ownerOP.getObject(), startAt);
startAt++;
}
}
return true;
}
Aggregations