use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class LocalRegion method virtualPut.
/**
* Subclasses should reimplement if needed
*/
boolean virtualPut(final EntryEventImpl event, final boolean ifNew, final boolean ifOld, Object expectedOldValue, boolean requireOldValue, final long lastModified, final boolean overwriteDestroyed) throws TimeoutException, CacheWriterException {
if (!MemoryThresholds.isLowMemoryExceptionDisabled()) {
checkIfAboveThreshold(event);
}
Operation originalOp = event.getOperation();
RegionEntry oldEntry;
try {
oldEntry = this.entries.basicPut(event, lastModified, ifNew, ifOld, expectedOldValue, requireOldValue, overwriteDestroyed);
} catch (ConcurrentCacheModificationException ignore) {
// thread got around to doing so
if (logger.isDebugEnabled()) {
logger.debug("caught concurrent modification attempt when applying {}", event);
}
notifyBridgeClients(event);
return false;
}
// for EMPTY clients, see if a concurrent map operation had an entry on the server
ServerRegionProxy mySRP = getServerProxy();
if (mySRP != null && this.dataPolicy == DataPolicy.EMPTY) {
if (originalOp == Operation.PUT_IF_ABSENT) {
return !event.hasOldValue();
}
if (originalOp == Operation.REPLACE && !requireOldValue) {
// LocalRegion.serverPut throws an EntryNotFoundException if the operation failed
return true;
}
}
return oldEntry != null;
}
use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class PutAllPRMessage method doLocalPutAll.
/**
* This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
* sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
* very important that this message does NOT cause any deadlocks as the sender will wait
* indefinitely for the acknowledgment
*
* @param r partitioned region eventSender the endpoint server who received request from client
* lastModified timestamp for last modification
* @return If succeeds, return true, otherwise, throw exception
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings("IMSE_DONT_CATCH_IMSE")
public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember eventSender, long lastModified) throws EntryExistsException, ForceReattemptException, DataLocationException {
boolean didPut = false;
long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
if (r.hasServerProxy()) {
clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage: doLocalPutAll: clientReadTimeOut is {}", clientReadTimeOut);
}
}
DistributedPutAllOperation dpao = null;
@Released EntryEventImpl baseEvent = null;
BucketRegion bucketRegion = null;
PartitionedRegionDataStore ds = r.getDataStore();
InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
try {
if (!notificationOnly) {
// bucketRegion is not null only when !notificationOnly
bucketRegion = ds.getInitializedBucketForId(null, bucketId);
this.versions = new VersionedObjectList(this.putAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
// create a base event and a DPAO for PutAllMessage distributed btw redundant buckets
baseEvent = EntryEventImpl.create(bucketRegion, Operation.PUTALL_CREATE, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
// set baseEventId to the first entry's event id. We need the thread id for DACE
baseEvent.setEventId(putAllPRData[0].getEventID());
if (this.bridgeContext != null) {
baseEvent.setContext(this.bridgeContext);
}
baseEvent.setPossibleDuplicate(this.posDup);
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
}
dpao = new DistributedPutAllOperation(baseEvent, putAllPRDataSize, false);
}
// Fix the updateMsg misorder issue
// Lock the keys when doing postPutAll
Object[] keys = new Object[putAllPRDataSize];
for (int i = 0; i < putAllPRDataSize; ++i) {
keys[i] = putAllPRData[i].getKey();
}
if (!notificationOnly) {
try {
if (putAllPRData.length > 0) {
if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
if (logger.isDebugEnabled()) {
logger.debug("attempting to locate version tags for retried event");
}
// of the previous attempt
for (int i = 0; i < putAllPRDataSize; i++) {
if (putAllPRData[i].versionTag == null) {
putAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(putAllPRData[i].getEventID());
if (putAllPRData[i].versionTag != null) {
putAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
}
}
}
}
EventID eventID = putAllPRData[0].getEventID();
ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
bucketRegion.recordBulkOpStart(membershipID, eventID);
}
bucketRegion.waitUntilLocked(keys);
boolean lockedForPrimary = false;
final HashMap succeeded = new HashMap();
PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
Object key = keys[0];
try {
bucketRegion.doLockForPrimary(false);
lockedForPrimary = true;
/*
* The real work to be synchronized, it will take long time. We don't worry about
* another thread to send any msg which has the same key in this request, because these
* request will be blocked by foundKey
*/
for (int i = 0; i < putAllPRDataSize; i++) {
@Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
try {
key = ev.getKey();
ev.setPutAllOperation(dpao);
// make sure a local update inserts a cache de-serializable
ev.makeSerializedNewValue();
// then in basicPutPart3(), the ev is added into dpao
try {
didPut = r.getDataView().putEntryOnRemote(ev, false, false, null, false, lastModified, true);
if (didPut && logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll:putLocally success for {}", ev);
}
} catch (ConcurrentCacheModificationException e) {
didPut = true;
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll:putLocally encountered concurrent cache modification for {}", ev, e);
}
}
putAllPRData[i].setTailKey(ev.getTailKey());
if (!didPut) {
// make sure the region hasn't gone away
r.checkReadiness();
ForceReattemptException fre = new ForceReattemptException("unable to perform put in PutAllPR, but operation should not fail");
fre.setHash(ev.getKey().hashCode());
throw fre;
} else {
succeeded.put(putAllPRData[i].getKey(), putAllPRData[i].getValue());
this.versions.addKeyAndVersion(putAllPRData[i].getKey(), ev.getVersionTag());
}
} finally {
ev.release();
}
}
// for
} catch (IllegalMonitorStateException ignore) {
throw new ForceReattemptException("unable to get lock for primary, retrying... ");
} catch (CacheWriterException cwe) {
// encounter cacheWriter exception
partialKeys.saveFailedKey(key, cwe);
} finally {
try {
// Only PutAllPRMessage knows if the thread id is fake. Event has no idea.
// So we have to manually set useFakeEventId for this DPAO
dpao.setUseFakeEventId(true);
r.checkReadiness();
bucketRegion.getDataView().postPutAll(dpao, this.versions, bucketRegion);
} finally {
if (lockedForPrimary) {
bucketRegion.doUnlockForPrimary();
}
}
}
if (partialKeys.hasFailure()) {
partialKeys.addKeysAndVersions(this.versions);
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage: partial keys applied, map to bucket {}'s keys: {}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
}
throw new PutAllPartialResultException(partialKeys);
}
} catch (RegionDestroyedException e) {
ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
} finally {
bucketRegion.removeAndNotifyKeys(keys);
}
} else {
for (int i = 0; i < putAllPRDataSize; i++) {
EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
try {
ev.setOriginRemote(true);
if (this.callbackArg != null) {
ev.setCallbackArgument(this.callbackArg);
}
r.invokePutCallbacks(ev.getOperation().isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, ev, r.isInitialized(), true);
} finally {
ev.release();
}
}
}
} finally {
if (baseEvent != null)
baseEvent.release();
if (dpao != null)
dpao.freeOffHeapResources();
}
return true;
}
use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class RemoveAllPRMessage method doLocalRemoveAll.
/**
* This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
* sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
* very important that this message does NOT cause any deadlocks as the sender will wait
* indefinitely for the acknowledgment
*
* @param r partitioned region
* @param eventSender the endpoint server who received request from client
* @param cacheWrite if true invoke cacheWriter before desrtoy
* @return If succeeds, return true, otherwise, throw exception
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IMSE_DONT_CATCH_IMSE")
public boolean doLocalRemoveAll(PartitionedRegion r, InternalDistributedMember eventSender, boolean cacheWrite) throws EntryExistsException, ForceReattemptException, DataLocationException {
boolean didRemove = false;
long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
if (r.hasServerProxy()) {
clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
if (logger.isDebugEnabled()) {
logger.debug("RemoveAllPRMessage: doLocalRemoveAll: clientReadTimeOut is {}", clientReadTimeOut);
}
}
DistributedRemoveAllOperation op = null;
@Released EntryEventImpl baseEvent = null;
BucketRegion bucketRegion = null;
PartitionedRegionDataStore ds = r.getDataStore();
InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
try {
if (!notificationOnly) {
// bucketRegion is not null only when !notificationOnly
bucketRegion = ds.getInitializedBucketForId(null, bucketId);
this.versions = new VersionedObjectList(this.removeAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
// create a base event and a DPAO for RemoveAllMessage distributed btw redundant buckets
baseEvent = EntryEventImpl.create(bucketRegion, Operation.REMOVEALL_DESTROY, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
// set baseEventId to the first entry's event id. We need the thread id for DACE
baseEvent.setEventId(removeAllPRData[0].getEventID());
if (this.bridgeContext != null) {
baseEvent.setContext(this.bridgeContext);
}
baseEvent.setPossibleDuplicate(this.posDup);
if (logger.isDebugEnabled()) {
logger.debug("RemoveAllPRMessage.doLocalRemoveAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
}
op = new DistributedRemoveAllOperation(baseEvent, removeAllPRDataSize, false);
}
// Fix the updateMsg misorder issue
// Lock the keys when doing postRemoveAll
Object[] keys = new Object[removeAllPRDataSize];
for (int i = 0; i < removeAllPRDataSize; ++i) {
keys[i] = removeAllPRData[i].getKey();
}
if (!notificationOnly) {
try {
if (removeAllPRData.length > 0) {
if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
if (logger.isDebugEnabled()) {
logger.debug("attempting to locate version tags for retried event");
}
// of the previous attempt
for (int i = 0; i < removeAllPRDataSize; i++) {
if (removeAllPRData[i].versionTag == null) {
removeAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(removeAllPRData[i].getEventID());
if (removeAllPRData[i].versionTag != null) {
removeAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
}
}
}
}
EventID eventID = removeAllPRData[0].getEventID();
ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
bucketRegion.recordBulkOpStart(membershipID, eventID);
}
bucketRegion.waitUntilLocked(keys);
boolean lockedForPrimary = false;
final ArrayList<Object> succeeded = new ArrayList<Object>();
PutAllPartialResult partialKeys = new PutAllPartialResult(removeAllPRDataSize);
Object key = keys[0];
try {
bucketRegion.doLockForPrimary(false);
lockedForPrimary = true;
/*
* The real work to be synchronized, it will take long time. We don't worry about
* another thread to send any msg which has the same key in this request, because these
* request will be blocked by foundKey
*/
for (int i = 0; i < removeAllPRDataSize; i++) {
@Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, removeAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
try {
key = ev.getKey();
ev.setRemoveAllOperation(op);
// then in basicPutPart3(), the ev is added into op
try {
r.getDataView().destroyOnRemote(ev, cacheWrite, null);
didRemove = true;
if (logger.isDebugEnabled()) {
logger.debug("RemoveAllPRMessage.doLocalRemoveAll:removeLocally success for " + ev);
}
} catch (EntryNotFoundException ignore) {
didRemove = true;
if (ev.getVersionTag() == null) {
if (logger.isDebugEnabled()) {
logger.debug("doLocalRemoveAll:RemoveAll encoutered EntryNotFoundException: event={}", ev);
}
}
} catch (ConcurrentCacheModificationException e) {
didRemove = true;
if (logger.isDebugEnabled()) {
logger.debug("RemoveAllPRMessage.doLocalRemoveAll:removeLocally encountered concurrent cache modification for " + ev);
}
}
removeAllPRData[i].setTailKey(ev.getTailKey());
if (!didRemove) {
// make sure the region hasn't gone away
r.checkReadiness();
ForceReattemptException fre = new ForceReattemptException("unable to perform remove in RemoveAllPR, but operation should not fail");
fre.setHash(ev.getKey().hashCode());
throw fre;
} else {
succeeded.add(removeAllPRData[i].getKey());
this.versions.addKeyAndVersion(removeAllPRData[i].getKey(), ev.getVersionTag());
}
} finally {
ev.release();
}
}
// for
} catch (IllegalMonitorStateException ex) {
ForceReattemptException fre = new ForceReattemptException("unable to get lock for primary, retrying... ");
throw fre;
} catch (CacheWriterException cwe) {
// encounter cacheWriter exception
partialKeys.saveFailedKey(key, cwe);
} finally {
try {
// Only RemoveAllPRMessage knows if the thread id is fake. Event has no idea.
// So we have to manually set useFakeEventId for this op
op.setUseFakeEventId(true);
r.checkReadiness();
bucketRegion.getDataView().postRemoveAll(op, this.versions, bucketRegion);
} finally {
if (lockedForPrimary) {
bucketRegion.doUnlockForPrimary();
}
}
}
if (partialKeys.hasFailure()) {
partialKeys.addKeysAndVersions(this.versions);
if (logger.isDebugEnabled()) {
logger.debug("RemoveAllPRMessage: partial keys applied, map to bucket {}'s keys:{}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
}
throw new PutAllPartialResultException(partialKeys);
}
} catch (RegionDestroyedException e) {
ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
} finally {
bucketRegion.removeAndNotifyKeys(keys);
}
} else {
for (int i = 0; i < removeAllPRDataSize; i++) {
EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, removeAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
try {
ev.setOriginRemote(true);
if (this.callbackArg != null) {
ev.setCallbackArgument(this.callbackArg);
}
r.invokeDestroyCallbacks(EnumListenerEvent.AFTER_DESTROY, ev, r.isInitialized(), true);
} finally {
ev.release();
}
}
}
} finally {
if (baseEvent != null)
baseEvent.release();
if (op != null)
op.freeOffHeapResources();
}
return true;
}
use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class AbstractRegionMap method initialImagePut.
public boolean initialImagePut(final Object key, final long lastModified, Object newValue, final boolean wasRecovered, boolean deferLRUCallback, VersionTag entryVersion, InternalDistributedMember sender, boolean isSynchronizing) {
boolean result = false;
boolean done = false;
boolean cleared = false;
final LocalRegion owner = _getOwner();
if (newValue == Token.TOMBSTONE && !owner.getConcurrencyChecksEnabled()) {
return false;
}
if (owner instanceof HARegion && newValue instanceof CachedDeserializable) {
Object actualVal = ((CachedDeserializable) newValue).getDeserializedValue(null, null);
if (actualVal instanceof HAEventWrapper) {
HAEventWrapper haEventWrapper = (HAEventWrapper) actualVal;
// Key was removed at sender side so not putting it into the HARegion
if (haEventWrapper.getClientUpdateMessage() == null) {
return false;
}
// Getting the instance from singleton CCN..This assumes only one bridge
// server in the VM
HAContainerWrapper haContainer = (HAContainerWrapper) CacheClientNotifier.getInstance().getHaContainer();
if (haContainer == null) {
return false;
}
HAEventWrapper original = null;
// synchronized (haContainer) {
do {
ClientUpdateMessageImpl oldMsg = (ClientUpdateMessageImpl) haContainer.putIfAbsent(haEventWrapper, haEventWrapper.getClientUpdateMessage());
if (oldMsg != null) {
original = (HAEventWrapper) haContainer.getKey(haEventWrapper);
if (original == null) {
continue;
}
synchronized (original) {
if ((HAEventWrapper) haContainer.getKey(original) != null) {
original.incAndGetReferenceCount();
HARegionQueue.addClientCQsAndInterestList(oldMsg, haEventWrapper, haContainer, owner.getName());
haEventWrapper.setClientUpdateMessage(null);
newValue = CachedDeserializableFactory.create(original, ((CachedDeserializable) newValue).getSizeInBytes());
} else {
original = null;
}
}
} else {
// putIfAbsent successful
synchronized (haEventWrapper) {
haEventWrapper.incAndGetReferenceCount();
haEventWrapper.setHAContainer(haContainer);
haEventWrapper.setClientUpdateMessage(null);
haEventWrapper.setIsRefFromHAContainer(true);
}
break;
}
// try until we either get a reference to HAEventWrapper from
// HAContainer or successfully put one into it.
} while (original == null);
/*
* entry = (Map.Entry)haContainer.getEntry(haEventWrapper); if (entry != null) { original =
* (HAEventWrapper)entry.getKey(); original.incAndGetReferenceCount(); } else {
* haEventWrapper.incAndGetReferenceCount(); haEventWrapper.setHAContainer(haContainer);
* haContainer.put(haEventWrapper, haEventWrapper .getClientUpdateMessage());
* haEventWrapper.setClientUpdateMessage(null);
* haEventWrapper.setIsRefFromHAContainer(true); } } if (entry != null) {
* HARegionQueue.addClientCQsAndInterestList(entry, haEventWrapper, haContainer,
* owner.getName()); haEventWrapper.setClientUpdateMessage(null); newValue =
* CachedDeserializableFactory.create(original,
* ((CachedDeserializable)newValue).getSizeInBytes()); }
*/
}
}
try {
RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
EntryEventImpl event = null;
@Retained @Released Object oldValue = null;
try {
RegionEntry oldRe = null;
synchronized (newRe) {
try {
oldRe = putEntryIfAbsent(key, newRe);
while (!done && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(key, oldRe);
oldRe = putEntryIfAbsent(key, newRe);
} else {
boolean acceptedVersionTag = false;
if (entryVersion != null && owner.concurrencyChecksEnabled) {
Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
try {
boolean isTombstone = (newValue == Token.TOMBSTONE);
// don't reschedule the tombstone if it hasn't changed
boolean isSameTombstone = oldRe.isTombstone() && isTombstone && oldRe.getVersionStamp().asVersionTag().equals(entryVersion);
if (isSameTombstone) {
return true;
}
processVersionTagForGII(oldRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
acceptedVersionTag = true;
} catch (ConcurrentCacheModificationException e) {
return false;
}
}
final boolean oldIsTombstone = oldRe.isTombstone();
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
try {
result = oldRe.initialImagePut(owner, lastModified, newValue, wasRecovered, acceptedVersionTag);
if (result) {
if (oldIsTombstone) {
owner.unscheduleTombstone(oldRe);
if (newValue != Token.TOMBSTONE) {
lruEntryCreate(oldRe);
} else {
lruEntryUpdate(oldRe);
}
}
if (newValue == Token.TOMBSTONE) {
owner.updateSizeOnRemove(key, oldSize);
if (owner.getServerProxy() == null && owner.getVersionVector().isTombstoneTooOld(entryVersion.getMemberID(), entryVersion.getRegionVersion())) {
// the received tombstone has already been reaped, so don't retain it
removeTombstone(oldRe, entryVersion, false, false);
return false;
} else {
owner.scheduleTombstone(oldRe, entryVersion);
lruEntryDestroy(oldRe);
}
} else {
int newSize = owner.calculateRegionEntryValueSize(oldRe);
if (!oldIsTombstone) {
owner.updateSizeOnPut(key, oldSize, newSize);
} else {
owner.updateSizeOnCreate(key, newSize);
}
EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
}
}
if (owner.getIndexManager() != null) {
// as the update could not locate the old key
if (!oldRe.isRemoved()) {
owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
}
owner.getIndexManager().updateIndexes(oldRe, oldRe.isRemoved() ? IndexManager.ADD_ENTRY : IndexManager.UPDATE_ENTRY, oldRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
}
done = true;
} finally {
if (event != null) {
event.release();
event = null;
}
}
}
}
}
if (!done) {
boolean versionTagAccepted = false;
if (entryVersion != null && owner.concurrencyChecksEnabled) {
Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
try {
boolean isTombstone = (newValue == Token.TOMBSTONE);
processVersionTagForGII(newRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
versionTagAccepted = true;
} catch (ConcurrentCacheModificationException e) {
return false;
}
}
result = newRe.initialImageInit(owner, lastModified, newValue, true, wasRecovered, versionTagAccepted);
try {
if (result) {
if (newValue == Token.TOMBSTONE) {
owner.scheduleTombstone(newRe, entryVersion);
} else {
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(newRe));
EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
lruEntryCreate(newRe);
}
incEntryCount(1);
}
// Update local indexes
if (owner.getIndexManager() != null) {
// the update could not locate the old key
if (oldRe != null && !oldRe.isRemoved()) {
owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
}
owner.getIndexManager().updateIndexes(newRe, newRe.isRemoved() ? IndexManager.REMOVE_ENTRY : IndexManager.UPDATE_ENTRY, newRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
}
done = true;
} finally {
if (event != null) {
event.release();
event = null;
}
}
}
} finally {
if (done && result) {
initialImagePutEntry(newRe);
}
if (!done) {
removeEntry(key, newRe, false);
if (owner.getIndexManager() != null) {
owner.getIndexManager().updateIndexes(newRe, IndexManager.REMOVE_ENTRY, IndexProtocol.OTHER_OP);
}
}
}
}
// synchronized
} finally {
if (event != null)
event.release();
OffHeapHelper.release(oldValue);
}
} catch (RegionClearedException rce) {
// Asif: do not issue any sort of callbacks
done = false;
cleared = true;
} catch (QueryException qe) {
done = false;
cleared = true;
} finally {
if (done && !deferLRUCallback) {
lruUpdateCallback();
} else if (!cleared) {
resetThreadLocals();
}
}
return result;
}
use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class AbstractRegionMap method invalidate.
public boolean invalidate(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry, boolean forceCallbacks) throws EntryNotFoundException {
final boolean isDebugEnabled = logger.isDebugEnabled();
final LocalRegion owner = _getOwner();
if (owner == null) {
// "fix" for bug 32440
Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
}
boolean didInvalidate = false;
RegionEntry invalidatedRe = null;
boolean clearOccured = false;
DiskRegion dr = owner.getDiskRegion();
boolean ownerIsInitialized = owner.isInitialized();
try {
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
IndexManager oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
lockForCacheModification(owner, event);
try {
try {
if (forceNewEntry || forceCallbacks) {
boolean opCompleted = false;
RegionEntry newRe = getEntryFactory().createEntry(owner, event.getKey(), Token.REMOVED_PHASE1);
synchronized (newRe) {
try {
RegionEntry oldRe = putEntryIfAbsent(event.getKey(), newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
// proceed to phase 2 of removal.
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(event.getKey(), oldRe);
oldRe = putEntryIfAbsent(event.getKey(), newRe);
} else {
opCompleted = true;
event.setRegionEntry(oldRe);
if (oldRe.isDestroyed()) {
if (isDebugEnabled) {
logger.debug("mapInvalidate: Found DESTROYED token, not invalidated; key={}", event.getKey());
}
} else if (oldRe.isInvalid()) {
// was already invalid, do not invoke listeners or increment stat
if (isDebugEnabled) {
logger.debug("mapInvalidate: Entry already invalid: '{}'", event.getKey());
}
processVersionTag(oldRe, event);
try {
// OFFHEAP noop setting
oldRe.setValue(owner, oldRe.getValueInVM(owner));
// an already invalid to
// invalid; No need to
// call
// prepareValueForCache
// since it is an
// invalid token.
} catch (RegionClearedException e) {
// that's okay - when writing an invalid into a disk, the
// region has been cleared (including this token)
}
} else {
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled && event.noVersionReceivedFromServer()) {
// entry here
return false;
}
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
// added for cq which needs old value. rdubey
FilterProfile fp = owner.getFilterProfile();
if (!oldRe.isRemoved() && (fp != null && fp.getCqCount() > 0)) {
// OFFHEAP EntryEventImpl
Object oldValue = oldRe.getValueInVM(owner);
// this will not fault in the value.
if (oldValue == Token.NOT_AVAILABLE) {
event.setOldValue(oldRe.getValueOnDiskOrBuffer(owner));
} else {
event.setOldValue(oldValue);
}
}
boolean isCreate = false;
try {
if (oldRe.isRemoved()) {
processVersionTag(oldRe, event);
event.putNewEntry(owner, oldRe);
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
if (!oldRe.isTombstone()) {
owner.updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize());
} else {
owner.updateSizeOnCreate(event.getKey(), event.getNewValueBucketSize());
isCreate = true;
}
} else {
processVersionTag(oldRe, event);
event.putExistingEntry(owner, oldRe);
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
owner.updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize());
}
} catch (RegionClearedException e) {
// generate versionTag for the event
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
clearOccured = true;
}
owner.basicInvalidatePart2(oldRe, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
if (isCreate) {
lruEntryCreate(oldRe);
} else {
lruEntryUpdate(oldRe);
}
}
didInvalidate = true;
invalidatedRe = oldRe;
}
}
}
// synchronized oldRe
}
if (!opCompleted) {
if (forceNewEntry && event.isFromServer()) {
// CCU invalidations before 7.0, and listeners don't care
if (!FORCE_INVALIDATE_EVENT) {
event.inhibitCacheListenerNotification(true);
}
}
event.setRegionEntry(newRe);
owner.serverInvalidate(event);
if (!forceNewEntry && event.noVersionReceivedFromServer()) {
// entry here
return false;
}
try {
ownerIsInitialized = owner.isInitialized();
if (!ownerIsInitialized && owner.getDataPolicy().withReplication()) {
final int oldSize = owner.calculateRegionEntryValueSize(newRe);
invalidateEntry(event, newRe, oldSize);
} else {
invalidateNewEntry(event, owner, newRe);
}
} catch (RegionClearedException e) {
// TODO: deltaGII: do we even need RegionClearedException?
// generate versionTag for the event
owner.recordEvent(event);
clearOccured = true;
}
owner.basicInvalidatePart2(newRe, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
lruEntryCreate(newRe);
incEntryCount(1);
}
opCompleted = true;
didInvalidate = true;
invalidatedRe = newRe;
// for this invalidate
if (!forceNewEntry) {
removeEntry(event.getKey(), newRe, false);
}
}
// !opCompleted
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
} finally {
if (!opCompleted) {
removeEntry(event.getKey(), newRe, false);
}
}
}
// synchronized newRe
} else // forceNewEntry
{
// !forceNewEntry
boolean retry = true;
while (retry) {
retry = false;
boolean entryExisted = false;
RegionEntry re = getEntry(event.getKey());
RegionEntry tombstone = null;
boolean haveTombstone = false;
if (re != null && re.isTombstone()) {
tombstone = re;
haveTombstone = true;
re = null;
}
if (re == null) {
ownerIsInitialized = owner.isInitialized();
if (!ownerIsInitialized) {
// when GII message arrived or processed later than invalidate
// message, the entry should be created as placeholder
RegionEntry newRe = haveTombstone ? tombstone : getEntryFactory().createEntry(owner, event.getKey(), Token.INVALID);
synchronized (newRe) {
if (haveTombstone && !tombstone.isTombstone()) {
// state of the tombstone has changed so we need to retry
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
re = putEntryIfAbsent(event.getKey(), newRe);
if (re == tombstone) {
// pretend we don't have an entry
re = null;
}
}
} else if (owner.getServerProxy() != null) {
Object sync = haveTombstone ? tombstone : new Object();
synchronized (sync) {
if (haveTombstone && !tombstone.isTombstone()) {
// bug 45295: state of the tombstone has changed so we need to retry
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
// bug #43287 - send event to server even if it's not in the client (LRU may
// have evicted it)
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled) {
if (event.getVersionTag() == null) {
// entry here
return false;
} else if (tombstone != null) {
processVersionTag(tombstone, event);
try {
if (!tombstone.isTombstone()) {
if (isDebugEnabled) {
logger.debug("tombstone is no longer a tombstone. {}:event={}", tombstone, event);
}
}
tombstone.setValue(owner, Token.TOMBSTONE);
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
// update the tombstone's version to prevent an older CCU/putAll from
// overwriting it
owner.rescheduleTombstone(tombstone, event.getVersionTag());
}
}
}
entryExisted = true;
}
}
if (re != null) {
// normal invalidate operation
synchronized (re) {
if (!event.isOriginRemote() && event.getOperation().isExpiration()) {
// used by a tx.
if (re.isInUseByTransaction()) {
return false;
}
}
if (re.isTombstone() || (!re.isRemoved() && !re.isDestroyed())) {
entryExisted = true;
if (re.isInvalid()) {
// stat
if (isDebugEnabled) {
logger.debug("Invalidate: Entry already invalid: '{}'", event.getKey());
}
if (event.getVersionTag() != null && owner.getVersionVector() != null) {
owner.getVersionVector().recordVersion((InternalDistributedMember) event.getDistributedMember(), event.getVersionTag());
}
} else {
// previous value not invalid
event.setRegionEntry(re);
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled && event.noVersionReceivedFromServer()) {
// entry here
if (isDebugEnabled) {
logger.debug("returning early because server did not generate a version stamp for this event:{}", event);
}
return false;
}
// in case of overflow to disk we need the old value for cqs.
if (owner.getFilterProfile().getCqCount() > 0) {
// use to be getValue and can cause dead lock rdubey.
if (re.isValueNull()) {
event.setOldValue(re.getValueOnDiskOrBuffer(owner));
} else {
Object v = re.getValueInVM(owner);
// OFFHEAP escapes to EntryEventImpl oldValue
event.setOldValue(v);
}
}
final boolean oldWasTombstone = re.isTombstone();
final int oldSize = _getOwner().calculateRegionEntryValueSize(re);
try {
invalidateEntry(event, re, oldSize);
} catch (RegionClearedException rce) {
// generate versionTag for the event
EntryLogger.logInvalidate(event);
_getOwner().recordEvent(event);
clearOccured = true;
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
owner.basicInvalidatePart2(re, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
if (oldWasTombstone) {
lruEntryCreate(re);
} else {
lruEntryUpdate(re);
}
}
didInvalidate = true;
invalidatedRe = re;
}
// previous value not invalid
}
}
// synchronized re
} else // re != null
{
// At this point, either it's not in GII mode, or the placeholder
// is in region, do nothing
}
if (!entryExisted) {
owner.checkEntryNotFound(event.getKey());
}
}
// while(retry)
}
// !forceNewEntry
} catch (DiskAccessException dae) {
invalidatedRe = null;
didInvalidate = false;
this._getOwner().handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
if (invalidatedRe != null) {
owner.basicInvalidatePart3(invalidatedRe, event, invokeCallbacks);
}
if (didInvalidate && !clearOccured) {
try {
lruUpdateCallback();
} catch (DiskAccessException dae) {
this._getOwner().handleDiskAccessException(dae);
throw dae;
}
} else if (!didInvalidate) {
resetThreadLocals();
}
}
return didInvalidate;
} finally {
if (ownerIsInitialized) {
forceInvalidateEvent(event, owner);
}
}
} finally {
releaseCacheModificationLock(owner, event);
}
}
Aggregations