use of org.eclipse.persistence.internal.queries.ContainerPolicy in project eclipselink by eclipse-ee4j.
the class AggregateCollectionMapping method mergeChangesIntoObject.
/**
* INTERNAL:
* Merge changes from the source to the target object.
* Because this is a collection mapping, values are added to or removed from the
* collection based on the changeset
*/
@Override
public void mergeChangesIntoObject(Object target, ChangeRecord changeRecord, Object source, MergeManager mergeManager, AbstractSession targetSession) {
if (this.descriptor.getCachePolicy().isProtectedIsolation() && !this.isCacheable && !targetSession.isProtectedSession()) {
setAttributeValueInObject(target, this.indirectionPolicy.buildIndirectObject(new ValueHolder<>(null)));
return;
}
// Check to see if the target has an instantiated collection
if (!isAttributeValueInstantiatedOrChanged(target)) {
// Then do nothing.
return;
}
ContainerPolicy containerPolicy = getContainerPolicy();
AbstractSession session = mergeManager.getSession();
Object valueOfTarget = null;
// At this point the source's indirection must be instantiated or the changeSet would never have
// been created
Object sourceAggregate = null;
// find the originals for merging and indirection information may be lost.
if (mergeManager.shouldMergeChangesIntoDistributedCache()) {
ClassDescriptor descriptor = getDescriptor();
AbstractRecord parentRow = descriptor.getObjectBuilder().extractPrimaryKeyRowFromObject(target, session);
// fix for indirection
Object result = getIndirectionPolicy().valueFromQuery(getSelectionQuery(), parentRow, session);
setAttributeValueInObject(target, result);
return;
}
// iterate over the changes and merge the collections
List<ObjectChangeSet> aggregateObjects = ((AggregateCollectionChangeRecord) changeRecord).getChangedValues();
int size = aggregateObjects.size();
valueOfTarget = containerPolicy.containerInstance(size);
// Next iterate over the changes and add them to the container
ObjectChangeSet objectChanges = null;
for (int index = 0; index < size; ++index) {
objectChanges = aggregateObjects.get(index);
Class<?> localClassType = objectChanges.getClassType(session);
sourceAggregate = objectChanges.getUnitOfWorkClone();
// cr 4155 Load the target from the UnitOfWork. This will be the original
// aggregate object that has the original indirection in it.
Object targetAggregate = ((UnitOfWorkImpl) mergeManager.getSession()).getCloneToOriginals().get(sourceAggregate);
if (targetAggregate == null) {
targetAggregate = getReferenceDescriptor(localClassType, session).getObjectBuilder().buildNewInstance();
}
getReferenceDescriptor(localClassType, session).getObjectBuilder().mergeChangesIntoObject(targetAggregate, objectChanges, sourceAggregate, mergeManager, targetSession);
containerPolicy.addInto(objectChanges.getNewKey(), targetAggregate, valueOfTarget, session);
}
setRealAttributeValueInObject(target, valueOfTarget);
}
use of org.eclipse.persistence.internal.queries.ContainerPolicy in project eclipselink by eclipse-ee4j.
the class AggregateCollectionMapping method compareObjects.
/**
* INTERNAL:
* Compare the attributes belonging to this mapping for the objects.
*/
@Override
public boolean compareObjects(Object firstObject, Object secondObject, AbstractSession session) {
Object firstCollection = getRealCollectionAttributeValueFromObject(firstObject, session);
Object secondCollection = getRealCollectionAttributeValueFromObject(secondObject, session);
if (this.listOrderField != null) {
return this.compareLists((List) firstCollection, (List) secondCollection, session);
}
ContainerPolicy containerPolicy = getContainerPolicy();
if (containerPolicy.sizeFor(firstCollection) != containerPolicy.sizeFor(secondCollection)) {
return false;
}
if (containerPolicy.sizeFor(firstCollection) == 0) {
return true;
}
if (isMapKeyMapping()) {
Object firstIter = containerPolicy.iteratorFor(firstCollection);
Object secondIter = containerPolicy.iteratorFor(secondCollection);
Map keyValues = new HashMap();
while (containerPolicy.hasNext(secondIter)) {
Map.Entry secondEntry = (Map.Entry) containerPolicy.nextEntry(secondIter, session);
Object primaryKey = getReferenceDescriptor().getObjectBuilder().extractPrimaryKeyFromObject(secondEntry.getValue(), session);
Object key = secondEntry.getKey();
keyValues.put(key, primaryKey);
}
while (containerPolicy.hasNext(firstIter)) {
Map.Entry firstEntry = (Map.Entry) containerPolicy.nextEntry(firstIter, session);
Object primaryKey = getReferenceDescriptor().getObjectBuilder().extractPrimaryKeyFromObject(firstEntry.getValue(), session);
Object key = firstEntry.getKey();
if (!primaryKey.equals(keyValues.get(key))) {
return false;
}
}
} else {
// iterator the first aggregate collection
for (Object iterFirst = containerPolicy.iteratorFor(firstCollection); containerPolicy.hasNext(iterFirst); ) {
// fetch the next object from the first iterator.
Object firstAggregateObject = containerPolicy.next(iterFirst, session);
// iterator the second aggregate collection
for (Object iterSecond = containerPolicy.iteratorFor(secondCollection); true; ) {
// fetch the next object from the second iterator.
Object secondAggregateObject = containerPolicy.next(iterSecond, session);
// matched object found, break to outer FOR loop
if (getReferenceDescriptor().getObjectBuilder().compareObjects(firstAggregateObject, secondAggregateObject, session)) {
break;
}
if (!containerPolicy.hasNext(iterSecond)) {
return false;
}
}
}
}
return true;
}
use of org.eclipse.persistence.internal.queries.ContainerPolicy in project eclipselink by eclipse-ee4j.
the class ReadAllQuery method execute.
/**
* INTERNAL:
* Execute the query. If there are cached results return those.
* This must override the super to support result caching.
*
* @param session - the session in which the receiver will be executed.
* @return An object or vector, the result of executing the query.
* @exception DatabaseException - an error has occurred on the database
*/
@Override
public Object execute(AbstractSession session, AbstractRecord row) throws DatabaseException {
if (shouldCacheQueryResults()) {
if (getContainerPolicy().overridesRead()) {
throw QueryException.cannotCacheCursorResultsOnQuery(this);
}
if (shouldConformResultsInUnitOfWork()) {
throw QueryException.cannotConformAndCacheQueryResults(this);
}
if (isPrepared()) {
// only prepared queries can have cached results.
Object queryResults = getQueryResults(session, row, true);
if (queryResults != null) {
if (QueryMonitor.shouldMonitor()) {
QueryMonitor.incrementReadAllHits(this);
}
session.incrementProfile(SessionProfiler.CacheHits, this);
// results, and return an empty container instance as configured
if (queryResults == InvalidObject.instance) {
return getContainerPolicy().containerInstance(0);
}
Collection results = (Collection) queryResults;
if (session.isUnitOfWork()) {
ContainerPolicy policy = getContainerPolicy();
Object resultCollection = policy.containerInstance(results.size());
Object iterator = policy.iteratorFor(results);
while (policy.hasNext(iterator)) {
Object result = ((UnitOfWorkImpl) session).registerExistingObject(policy.next(iterator, session), this.descriptor, null, true);
policy.addInto(result, resultCollection, session);
}
return resultCollection;
}
return results;
}
}
session.incrementProfile(SessionProfiler.CacheMisses, this);
}
if (QueryMonitor.shouldMonitor()) {
QueryMonitor.incrementReadAllMisses(this);
}
return super.execute(session, row);
}
use of org.eclipse.persistence.internal.queries.ContainerPolicy in project eclipselink by eclipse-ee4j.
the class ReadAllQuery method conformResult.
/**
* INTERNAL:
* Conform the result if specified.
*/
protected Object conformResult(Object result, UnitOfWorkImpl unitOfWork, AbstractRecord arguments, boolean buildDirectlyFromRows) {
Expression selectionCriteria = getSelectionCriteria();
if (selectionCriteria != null) {
ExpressionBuilder builder = getSelectionCriteria().getBuilder();
builder.setSession(unitOfWork.getRootSession(null));
builder.setQueryClass(getReferenceClass());
if (getQueryMechanism().isExpressionQueryMechanism() && selectionCriteria.isLogicalExpression()) {
// bug #526546
if (builder.derivedExpressions != null) {
for (Expression e : builder.derivedExpressions) {
if (e.isQueryKeyExpression() && ((QueryKeyExpression) e).shouldQueryToManyRelationship()) {
DatabaseMapping mapping = ((QueryKeyExpression) e).getMapping();
if (mapping.isOneToManyMapping()) {
OneToManyMapping otm = (OneToManyMapping) mapping;
Expression join = otm.buildSelectionCriteria();
selectionCriteria = selectionCriteria.and(join);
}
}
}
}
}
}
// If the query is redirected then the collection returned might no longer
// correspond to the original container policy. CR#2342-S.M.
ContainerPolicy cp;
if (getRedirector() != null) {
cp = ContainerPolicy.buildPolicyFor(result.getClass());
} else {
cp = getContainerPolicy();
}
// This code is now a great deal different... For one, registration is done
// as part of conforming. Also, this should only be called if one actually
// is conforming.
// First scan the UnitOfWork for conforming instances.
// This will walk through the entire cache of registered objects.
// Let p be objects from result not in the cache.
// Let c be objects from cache.
// Presently p intersect c = empty set, but later p subset c.
// By checking cache now doesConform will be called p fewer times.
Map<Object, Object> indexedInterimResult = unitOfWork.scanForConformingInstances(selectionCriteria, getReferenceClass(), arguments, this);
Cursor cursor = null;
// In the case of cursors just conform/register the initially read collection.
if (cp.isCursorPolicy()) {
cursor = (Cursor) result;
cp = ContainerPolicy.buildPolicyFor(ClassConstants.Vector_class);
// In nested UnitOfWork session might have been session of the parent.
cursor.setSession(unitOfWork);
result = cursor.getObjectCollection();
// for later incremental conforming...
cursor.setInitiallyConformingIndex(indexedInterimResult);
cursor.setSelectionCriteriaClone(getSelectionCriteria());
cursor.setTranslationRow(arguments);
}
// Now conform the result from the database.
// Remove any deleted or changed objects that no longer conform.
// Deletes will only work for simple queries, queries with or's or anyof's may not return
// correct results when untriggered indirection is in the model.
List fromDatabase = null;
// result is just a vector, not a container of wrapped originals.
if (buildDirectlyFromRows) {
List<AbstractRecord> rows = (List<AbstractRecord>) result;
int size = rows.size();
fromDatabase = new ArrayList(size);
for (int index = 0; index < size; index++) {
AbstractRecord row = rows.get(index);
// null is placed in the row collection for 1-m joining to filter duplicate rows.
if (row != null) {
Object clone = conformIndividualResult(buildObject(row), unitOfWork, arguments, getSelectionCriteria(), indexedInterimResult);
if (clone != null) {
fromDatabase.add(clone);
}
}
}
} else {
fromDatabase = new ArrayList(cp.sizeFor(result));
AbstractSession sessionToUse = unitOfWork.getParent();
for (Object iter = cp.iteratorFor(result); cp.hasNext(iter); ) {
Object object = cp.next(iter, sessionToUse);
Object clone = conformIndividualResult(registerIndividualResult(object, null, unitOfWork, null, null), unitOfWork, arguments, getSelectionCriteria(), indexedInterimResult);
if (clone != null) {
fromDatabase.add(clone);
}
}
}
// Now add the unwrapped conforming instances into an appropriate container.
// Wrapping is done automatically.
// Make sure a vector of exactly the right size is returned.
Object conformedResult = cp.containerInstance(indexedInterimResult.size() + fromDatabase.size());
for (Iterator<Object> enumtr = indexedInterimResult.values().iterator(); enumtr.hasNext(); ) {
Object eachClone = enumtr.next();
cp.addInto(eachClone, conformedResult, unitOfWork);
}
int size = fromDatabase.size();
for (int index = 0; index < size; index++) {
Object eachClone = fromDatabase.get(index);
cp.addInto(eachClone, conformedResult, unitOfWork);
}
if (cursor != null) {
cursor.setObjectCollection((List) conformedResult);
// the parent UnitOfWork.
if (unitOfWork.isNestedUnitOfWork()) {
for (Object clone : cursor.getObjectCollection()) {
indexedInterimResult.put(clone, clone);
}
}
return cursor;
} else {
return conformedResult;
}
}
use of org.eclipse.persistence.internal.queries.ContainerPolicy in project eclipselink by eclipse-ee4j.
the class ReadAllQuery method registerResultInUnitOfWork.
/**
* INTERNAL:
* All objects queried via a UnitOfWork get registered here. If the query
* went to the database.
* <p>
* Involves registering the query result individually and in totality, and
* hence refreshing / conforming is done here.
*
* @param result may be collection (read all) or an object (read one),
* or even a cursor. If in transaction the shared cache will
* be bypassed, meaning the result may not be originals from the parent
* but raw database rows.
* @param unitOfWork the unitOfWork the result is being registered in.
* @param arguments the original arguments/parameters passed to the query
* execution. Used by conforming
* @param buildDirectlyFromRows If in transaction must construct
* a registered result from raw database rows.
*
* @return the final (conformed, refreshed, wrapped) UnitOfWork query result
*/
@Override
public Object registerResultInUnitOfWork(Object result, UnitOfWorkImpl unitOfWork, AbstractRecord arguments, boolean buildDirectlyFromRows) {
// conforming and is now done while conforming to maximize performance.
if (// PERF: Avoid conforming empty uow.
unitOfWork.hasCloneMapping() && (shouldConformResultsInUnitOfWork() || this.descriptor.shouldAlwaysConformResultsInUnitOfWork())) {
return conformResult(result, unitOfWork, arguments, buildDirectlyFromRows);
}
// Also for cursors the initial connection is automatically registered.
if (buildDirectlyFromRows) {
List<AbstractRecord> rows = (List<AbstractRecord>) result;
ContainerPolicy cp = this.containerPolicy;
int size = rows.size();
Object clones = cp.containerInstance(size);
if (cp.shouldAddAll()) {
List clonesIn = new ArrayList(size);
List<AbstractRecord> rowsIn = new ArrayList(size);
for (int index = 0; index < size; index++) {
AbstractRecord row = rows.get(index);
// null is placed in the row collection for 1-m joining to filter duplicate rows.
if (row != null) {
Object clone = buildObject(row);
clonesIn.add(clone);
rowsIn.add(row);
}
}
cp.addAll(clonesIn, clones, unitOfWork, rowsIn, this, null, true);
} else {
boolean quickAdd = (clones instanceof Collection) && !this.descriptor.getObjectBuilder().hasWrapperPolicy();
if (this.descriptor.getCachePolicy().shouldPrefetchCacheKeys() && shouldMaintainCache() && !shouldRetrieveBypassCache() && ((!(unitOfWork.hasCommitManager() && unitOfWork.getCommitManager().isActive()) && !unitOfWork.wasTransactionBegunPrematurely() && !this.descriptor.getCachePolicy().shouldIsolateObjectsInUnitOfWork() && !this.descriptor.getCachePolicy().shouldIsolateProtectedObjectsInUnitOfWork()) || (unitOfWork.isClassReadOnly(this.getReferenceClass(), this.getDescriptor())))) {
Object[] pkList = new Object[size];
for (int i = 0; i < size; ++i) {
pkList[i] = getDescriptor().getObjectBuilder().extractPrimaryKeyFromRow(rows.get(i), session);
}
setPrefetchedCacheKeys(unitOfWork.getParentIdentityMapSession(this).getIdentityMapAccessorInstance().getAllCacheKeysFromIdentityMapWithEntityPK(pkList, descriptor));
}
for (int index = 0; index < size; index++) {
AbstractRecord row = rows.get(index);
// null is placed in the row collection for 1-m joining to filter duplicate rows.
if (row != null) {
Object clone = buildObject(row);
if (quickAdd) {
((Collection) clones).add(clone);
} else {
cp.addInto(clone, clones, unitOfWork, row, this, null, true);
}
}
}
}
return clones;
}
ContainerPolicy cp;
Cursor cursor = null;
// correspond to the original container policy. CR#2342-S.M.
if (getRedirector() != null) {
cp = ContainerPolicy.buildPolicyFor(result.getClass());
} else {
cp = this.containerPolicy;
}
// In the case of cursors just register the initially read collection.
if (cp.isCursorPolicy()) {
cursor = (Cursor) result;
// In nested UnitOfWork session might have been session of the parent.
cursor.setSession(unitOfWork);
cp = ContainerPolicy.buildPolicyFor(ClassConstants.Vector_class);
result = cursor.getObjectCollection();
}
Object clones = cp.containerInstance(cp.sizeFor(result));
AbstractSession sessionToUse = unitOfWork.getParent();
for (Object iter = cp.iteratorFor(result); cp.hasNext(iter); ) {
Object object = cp.next(iter, sessionToUse);
Object clone = registerIndividualResult(object, null, unitOfWork, this.joinedAttributeManager, null);
cp.addInto(clone, clones, unitOfWork);
}
if (cursor != null) {
cursor.setObjectCollection((Vector) clones);
return cursor;
} else {
return clones;
}
}
Aggregations