use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class CacheClientUpdater method handleDestroy.
/**
* locally destroy an entry
*
* @param clientMessage message describing the entry
*/
private void handleDestroy(Message clientMessage) {
String regionName = null;
Object key = null;
final boolean isDebugEnabled = logger.isDebugEnabled();
try {
this.isOpCompleted = false;
// Retrieve the data from the local-destroy message parts
if (isDebugEnabled) {
logger.debug("Received destroy message of length ({} bytes)", clientMessage.getPayloadLength());
}
int partCnt = 0;
Part regionNamePart = clientMessage.getPart(partCnt++);
Part keyPart = clientMessage.getPart(partCnt++);
Part callbackArgumentPart = clientMessage.getPart(partCnt++);
VersionTag versionTag = (VersionTag) clientMessage.getPart(partCnt++).getObject();
if (versionTag != null) {
versionTag.replaceNullIDs((InternalDistributedMember) this.endpoint.getMemberId());
}
regionName = regionNamePart.getString();
key = keyPart.getStringOrObject();
Part isInterestListPassedPart = clientMessage.getPart(partCnt++);
Part hasCqsPart = clientMessage.getPart(partCnt++);
boolean withInterest = ((Boolean) isInterestListPassedPart.getObject()).booleanValue();
boolean withCQs = ((Boolean) hasCqsPart.getObject()).booleanValue();
Object callbackArgument = callbackArgumentPart.getObject();
if (isDebugEnabled) {
logger.debug("Destroying entry for region: {} key: {} callbackArgument: {} withInterest={} withCQs={} version={}", regionName, key, callbackArgument, withInterest, withCQs, versionTag);
}
LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
if (region == null) {
if (isDebugEnabled && !quitting()) {
logger.debug("Region named {} does not exist", regionName);
}
} else if (region.hasServerProxy() && (withInterest || !withCQs)) {
EventID eventId = null;
try {
Part eid = clientMessage.getPart(clientMessage.getNumberOfParts() - 1);
eventId = (EventID) eid.getObject();
try {
region.basicBridgeClientDestroy(eventId.getDistributedMember(), key, callbackArgument, this.qManager.getState().getProcessedMarker() || !this.isDurableClient, eventId, versionTag);
} catch (ConcurrentCacheModificationException ignore) {
// allow CQs to be processed
}
this.isOpCompleted = true;
if (isDebugEnabled) {
logger.debug("Destroyed entry for region: {} key: {} callbackArgument: {}", regionName, key, callbackArgument);
}
} catch (EntryNotFoundException ignore) {
if (isDebugEnabled && !quitting()) {
logger.debug("Already destroyed entry for region: {} key: {} callbackArgument: {} eventId={}", regionName, key, callbackArgument, eventId.expensiveToString());
}
this.isOpCompleted = true;
}
}
if (withCQs) {
Part numCqsPart = clientMessage.getPart(partCnt++);
if (isDebugEnabled) {
logger.debug("Received message has CQ Event. Number of cqs interested in the event : {}", numCqsPart.getInt() / 2);
}
partCnt = processCqs(clientMessage, partCnt, numCqsPart.getInt(), clientMessage.getMessageType(), key, null);
this.isOpCompleted = true;
}
} catch (Exception e) {
String message = LocalizedStrings.CacheClientUpdater_THE_FOLLOWING_EXCEPTION_OCCURRED_WHILE_ATTEMPTING_TO_DESTROY_ENTRY_REGION_0_KEY_1.toLocalizedString(regionName, key);
handleException(message, e);
}
}
use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class DistributedRegionSearchLoadJUnitTest method testClientEventIsUpdatedWithCurrentEntryVersionTagAfterSearchConcurrencyException.
@Test
public void testClientEventIsUpdatedWithCurrentEntryVersionTagAfterSearchConcurrencyException() {
DistributedRegion region = prepare(true);
EntryEventImpl event = createDummyEvent(region);
region.basicInvalidate(event);
VersionTag tag = createVersionTag(true);
RegionEntry re = mock(RegionEntry.class);
VersionStamp stamp = mock(VersionStamp.class);
doReturn(re).when(region).getRegionEntry(any());
when(re.getVersionStamp()).thenReturn(stamp);
when(stamp.asVersionTag()).thenReturn(tag);
createSearchLoad();
doThrow(new ConcurrentCacheModificationException()).when(region).basicPutEntry(any(EntryEventImpl.class), anyLong());
KeyInfo ki = new KeyInfo(event.getKey(), null, null);
region.findObjectInSystem(ki, false, null, false, null, false, false, null, event, false);
assertNotNull("ClientEvent version tag is not set with region version tag.", event.getVersionTag());
}
use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class AbstractRegionMap method initialImagePut.
public boolean initialImagePut(final Object key, final long lastModified, Object newValue, final boolean wasRecovered, boolean deferLRUCallback, VersionTag entryVersion, InternalDistributedMember sender, boolean isSynchronizing) {
boolean result = false;
boolean done = false;
boolean cleared = false;
final LocalRegion owner = _getOwner();
if (newValue == Token.TOMBSTONE && !owner.getConcurrencyChecksEnabled()) {
return false;
}
if (owner instanceof HARegion && newValue instanceof CachedDeserializable) {
Object actualVal = ((CachedDeserializable) newValue).getDeserializedValue(null, null);
if (actualVal instanceof HAEventWrapper) {
HAEventWrapper haEventWrapper = (HAEventWrapper) actualVal;
// Key was removed at sender side so not putting it into the HARegion
if (haEventWrapper.getClientUpdateMessage() == null) {
return false;
}
// Getting the instance from singleton CCN..This assumes only one bridge
// server in the VM
HAContainerWrapper haContainer = (HAContainerWrapper) CacheClientNotifier.getInstance().getHaContainer();
if (haContainer == null) {
return false;
}
HAEventWrapper original = null;
// synchronized (haContainer) {
do {
ClientUpdateMessageImpl oldMsg = (ClientUpdateMessageImpl) haContainer.putIfAbsent(haEventWrapper, haEventWrapper.getClientUpdateMessage());
if (oldMsg != null) {
original = (HAEventWrapper) haContainer.getKey(haEventWrapper);
if (original == null) {
continue;
}
synchronized (original) {
if ((HAEventWrapper) haContainer.getKey(original) != null) {
original.incAndGetReferenceCount();
HARegionQueue.addClientCQsAndInterestList(oldMsg, haEventWrapper, haContainer, owner.getName());
haEventWrapper.setClientUpdateMessage(null);
newValue = CachedDeserializableFactory.create(original, ((CachedDeserializable) newValue).getSizeInBytes());
} else {
original = null;
}
}
} else {
// putIfAbsent successful
synchronized (haEventWrapper) {
haEventWrapper.incAndGetReferenceCount();
haEventWrapper.setHAContainer(haContainer);
haEventWrapper.setClientUpdateMessage(null);
haEventWrapper.setIsRefFromHAContainer(true);
}
break;
}
// try until we either get a reference to HAEventWrapper from
// HAContainer or successfully put one into it.
} while (original == null);
/*
* entry = (Map.Entry)haContainer.getEntry(haEventWrapper); if (entry != null) { original =
* (HAEventWrapper)entry.getKey(); original.incAndGetReferenceCount(); } else {
* haEventWrapper.incAndGetReferenceCount(); haEventWrapper.setHAContainer(haContainer);
* haContainer.put(haEventWrapper, haEventWrapper .getClientUpdateMessage());
* haEventWrapper.setClientUpdateMessage(null);
* haEventWrapper.setIsRefFromHAContainer(true); } } if (entry != null) {
* HARegionQueue.addClientCQsAndInterestList(entry, haEventWrapper, haContainer,
* owner.getName()); haEventWrapper.setClientUpdateMessage(null); newValue =
* CachedDeserializableFactory.create(original,
* ((CachedDeserializable)newValue).getSizeInBytes()); }
*/
}
}
try {
RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
EntryEventImpl event = null;
@Retained @Released Object oldValue = null;
try {
RegionEntry oldRe = null;
synchronized (newRe) {
try {
oldRe = putEntryIfAbsent(key, newRe);
while (!done && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(key, oldRe);
oldRe = putEntryIfAbsent(key, newRe);
} else {
boolean acceptedVersionTag = false;
if (entryVersion != null && owner.concurrencyChecksEnabled) {
Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
try {
boolean isTombstone = (newValue == Token.TOMBSTONE);
// don't reschedule the tombstone if it hasn't changed
boolean isSameTombstone = oldRe.isTombstone() && isTombstone && oldRe.getVersionStamp().asVersionTag().equals(entryVersion);
if (isSameTombstone) {
return true;
}
processVersionTagForGII(oldRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
acceptedVersionTag = true;
} catch (ConcurrentCacheModificationException e) {
return false;
}
}
final boolean oldIsTombstone = oldRe.isTombstone();
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
try {
result = oldRe.initialImagePut(owner, lastModified, newValue, wasRecovered, acceptedVersionTag);
if (result) {
if (oldIsTombstone) {
owner.unscheduleTombstone(oldRe);
if (newValue != Token.TOMBSTONE) {
lruEntryCreate(oldRe);
} else {
lruEntryUpdate(oldRe);
}
}
if (newValue == Token.TOMBSTONE) {
owner.updateSizeOnRemove(key, oldSize);
if (owner.getServerProxy() == null && owner.getVersionVector().isTombstoneTooOld(entryVersion.getMemberID(), entryVersion.getRegionVersion())) {
// the received tombstone has already been reaped, so don't retain it
removeTombstone(oldRe, entryVersion, false, false);
return false;
} else {
owner.scheduleTombstone(oldRe, entryVersion);
lruEntryDestroy(oldRe);
}
} else {
int newSize = owner.calculateRegionEntryValueSize(oldRe);
if (!oldIsTombstone) {
owner.updateSizeOnPut(key, oldSize, newSize);
} else {
owner.updateSizeOnCreate(key, newSize);
}
EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
}
}
if (owner.getIndexManager() != null) {
// as the update could not locate the old key
if (!oldRe.isRemoved()) {
owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
}
owner.getIndexManager().updateIndexes(oldRe, oldRe.isRemoved() ? IndexManager.ADD_ENTRY : IndexManager.UPDATE_ENTRY, oldRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
}
done = true;
} finally {
if (event != null) {
event.release();
event = null;
}
}
}
}
}
if (!done) {
boolean versionTagAccepted = false;
if (entryVersion != null && owner.concurrencyChecksEnabled) {
Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
try {
boolean isTombstone = (newValue == Token.TOMBSTONE);
processVersionTagForGII(newRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
versionTagAccepted = true;
} catch (ConcurrentCacheModificationException e) {
return false;
}
}
result = newRe.initialImageInit(owner, lastModified, newValue, true, wasRecovered, versionTagAccepted);
try {
if (result) {
if (newValue == Token.TOMBSTONE) {
owner.scheduleTombstone(newRe, entryVersion);
} else {
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(newRe));
EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
lruEntryCreate(newRe);
}
incEntryCount(1);
}
// Update local indexes
if (owner.getIndexManager() != null) {
// the update could not locate the old key
if (oldRe != null && !oldRe.isRemoved()) {
owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
}
owner.getIndexManager().updateIndexes(newRe, newRe.isRemoved() ? IndexManager.REMOVE_ENTRY : IndexManager.UPDATE_ENTRY, newRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
}
done = true;
} finally {
if (event != null) {
event.release();
event = null;
}
}
}
} finally {
if (done && result) {
initialImagePutEntry(newRe);
}
if (!done) {
removeEntry(key, newRe, false);
if (owner.getIndexManager() != null) {
owner.getIndexManager().updateIndexes(newRe, IndexManager.REMOVE_ENTRY, IndexProtocol.OTHER_OP);
}
}
}
}
// synchronized
} finally {
if (event != null)
event.release();
OffHeapHelper.release(oldValue);
}
} catch (RegionClearedException rce) {
// Asif: do not issue any sort of callbacks
done = false;
cleared = true;
} catch (QueryException qe) {
done = false;
cleared = true;
} finally {
if (done && !deferLRUCallback) {
lruUpdateCallback();
} else if (!cleared) {
resetThreadLocals();
}
}
return result;
}
use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class AbstractRegionMap method invalidate.
public boolean invalidate(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry, boolean forceCallbacks) throws EntryNotFoundException {
final boolean isDebugEnabled = logger.isDebugEnabled();
final LocalRegion owner = _getOwner();
if (owner == null) {
// "fix" for bug 32440
Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
}
boolean didInvalidate = false;
RegionEntry invalidatedRe = null;
boolean clearOccured = false;
DiskRegion dr = owner.getDiskRegion();
boolean ownerIsInitialized = owner.isInitialized();
try {
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
IndexManager oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
lockForCacheModification(owner, event);
try {
try {
if (forceNewEntry || forceCallbacks) {
boolean opCompleted = false;
RegionEntry newRe = getEntryFactory().createEntry(owner, event.getKey(), Token.REMOVED_PHASE1);
synchronized (newRe) {
try {
RegionEntry oldRe = putEntryIfAbsent(event.getKey(), newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
// proceed to phase 2 of removal.
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(event.getKey(), oldRe);
oldRe = putEntryIfAbsent(event.getKey(), newRe);
} else {
opCompleted = true;
event.setRegionEntry(oldRe);
if (oldRe.isDestroyed()) {
if (isDebugEnabled) {
logger.debug("mapInvalidate: Found DESTROYED token, not invalidated; key={}", event.getKey());
}
} else if (oldRe.isInvalid()) {
// was already invalid, do not invoke listeners or increment stat
if (isDebugEnabled) {
logger.debug("mapInvalidate: Entry already invalid: '{}'", event.getKey());
}
processVersionTag(oldRe, event);
try {
// OFFHEAP noop setting
oldRe.setValue(owner, oldRe.getValueInVM(owner));
// an already invalid to
// invalid; No need to
// call
// prepareValueForCache
// since it is an
// invalid token.
} catch (RegionClearedException e) {
// that's okay - when writing an invalid into a disk, the
// region has been cleared (including this token)
}
} else {
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled && event.noVersionReceivedFromServer()) {
// entry here
return false;
}
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
// added for cq which needs old value. rdubey
FilterProfile fp = owner.getFilterProfile();
if (!oldRe.isRemoved() && (fp != null && fp.getCqCount() > 0)) {
// OFFHEAP EntryEventImpl
Object oldValue = oldRe.getValueInVM(owner);
// this will not fault in the value.
if (oldValue == Token.NOT_AVAILABLE) {
event.setOldValue(oldRe.getValueOnDiskOrBuffer(owner));
} else {
event.setOldValue(oldValue);
}
}
boolean isCreate = false;
try {
if (oldRe.isRemoved()) {
processVersionTag(oldRe, event);
event.putNewEntry(owner, oldRe);
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
if (!oldRe.isTombstone()) {
owner.updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize());
} else {
owner.updateSizeOnCreate(event.getKey(), event.getNewValueBucketSize());
isCreate = true;
}
} else {
processVersionTag(oldRe, event);
event.putExistingEntry(owner, oldRe);
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
owner.updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize());
}
} catch (RegionClearedException e) {
// generate versionTag for the event
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
clearOccured = true;
}
owner.basicInvalidatePart2(oldRe, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
if (isCreate) {
lruEntryCreate(oldRe);
} else {
lruEntryUpdate(oldRe);
}
}
didInvalidate = true;
invalidatedRe = oldRe;
}
}
}
// synchronized oldRe
}
if (!opCompleted) {
if (forceNewEntry && event.isFromServer()) {
// CCU invalidations before 7.0, and listeners don't care
if (!FORCE_INVALIDATE_EVENT) {
event.inhibitCacheListenerNotification(true);
}
}
event.setRegionEntry(newRe);
owner.serverInvalidate(event);
if (!forceNewEntry && event.noVersionReceivedFromServer()) {
// entry here
return false;
}
try {
ownerIsInitialized = owner.isInitialized();
if (!ownerIsInitialized && owner.getDataPolicy().withReplication()) {
final int oldSize = owner.calculateRegionEntryValueSize(newRe);
invalidateEntry(event, newRe, oldSize);
} else {
invalidateNewEntry(event, owner, newRe);
}
} catch (RegionClearedException e) {
// TODO: deltaGII: do we even need RegionClearedException?
// generate versionTag for the event
owner.recordEvent(event);
clearOccured = true;
}
owner.basicInvalidatePart2(newRe, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
lruEntryCreate(newRe);
incEntryCount(1);
}
opCompleted = true;
didInvalidate = true;
invalidatedRe = newRe;
// for this invalidate
if (!forceNewEntry) {
removeEntry(event.getKey(), newRe, false);
}
}
// !opCompleted
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
} finally {
if (!opCompleted) {
removeEntry(event.getKey(), newRe, false);
}
}
}
// synchronized newRe
} else // forceNewEntry
{
// !forceNewEntry
boolean retry = true;
while (retry) {
retry = false;
boolean entryExisted = false;
RegionEntry re = getEntry(event.getKey());
RegionEntry tombstone = null;
boolean haveTombstone = false;
if (re != null && re.isTombstone()) {
tombstone = re;
haveTombstone = true;
re = null;
}
if (re == null) {
ownerIsInitialized = owner.isInitialized();
if (!ownerIsInitialized) {
// when GII message arrived or processed later than invalidate
// message, the entry should be created as placeholder
RegionEntry newRe = haveTombstone ? tombstone : getEntryFactory().createEntry(owner, event.getKey(), Token.INVALID);
synchronized (newRe) {
if (haveTombstone && !tombstone.isTombstone()) {
// state of the tombstone has changed so we need to retry
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
re = putEntryIfAbsent(event.getKey(), newRe);
if (re == tombstone) {
// pretend we don't have an entry
re = null;
}
}
} else if (owner.getServerProxy() != null) {
Object sync = haveTombstone ? tombstone : new Object();
synchronized (sync) {
if (haveTombstone && !tombstone.isTombstone()) {
// bug 45295: state of the tombstone has changed so we need to retry
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
// bug #43287 - send event to server even if it's not in the client (LRU may
// have evicted it)
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled) {
if (event.getVersionTag() == null) {
// entry here
return false;
} else if (tombstone != null) {
processVersionTag(tombstone, event);
try {
if (!tombstone.isTombstone()) {
if (isDebugEnabled) {
logger.debug("tombstone is no longer a tombstone. {}:event={}", tombstone, event);
}
}
tombstone.setValue(owner, Token.TOMBSTONE);
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
// update the tombstone's version to prevent an older CCU/putAll from
// overwriting it
owner.rescheduleTombstone(tombstone, event.getVersionTag());
}
}
}
entryExisted = true;
}
}
if (re != null) {
// normal invalidate operation
synchronized (re) {
if (!event.isOriginRemote() && event.getOperation().isExpiration()) {
// used by a tx.
if (re.isInUseByTransaction()) {
return false;
}
}
if (re.isTombstone() || (!re.isRemoved() && !re.isDestroyed())) {
entryExisted = true;
if (re.isInvalid()) {
// stat
if (isDebugEnabled) {
logger.debug("Invalidate: Entry already invalid: '{}'", event.getKey());
}
if (event.getVersionTag() != null && owner.getVersionVector() != null) {
owner.getVersionVector().recordVersion((InternalDistributedMember) event.getDistributedMember(), event.getVersionTag());
}
} else {
// previous value not invalid
event.setRegionEntry(re);
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled && event.noVersionReceivedFromServer()) {
// entry here
if (isDebugEnabled) {
logger.debug("returning early because server did not generate a version stamp for this event:{}", event);
}
return false;
}
// in case of overflow to disk we need the old value for cqs.
if (owner.getFilterProfile().getCqCount() > 0) {
// use to be getValue and can cause dead lock rdubey.
if (re.isValueNull()) {
event.setOldValue(re.getValueOnDiskOrBuffer(owner));
} else {
Object v = re.getValueInVM(owner);
// OFFHEAP escapes to EntryEventImpl oldValue
event.setOldValue(v);
}
}
final boolean oldWasTombstone = re.isTombstone();
final int oldSize = _getOwner().calculateRegionEntryValueSize(re);
try {
invalidateEntry(event, re, oldSize);
} catch (RegionClearedException rce) {
// generate versionTag for the event
EntryLogger.logInvalidate(event);
_getOwner().recordEvent(event);
clearOccured = true;
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
owner.basicInvalidatePart2(re, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
if (oldWasTombstone) {
lruEntryCreate(re);
} else {
lruEntryUpdate(re);
}
}
didInvalidate = true;
invalidatedRe = re;
}
// previous value not invalid
}
}
// synchronized re
} else // re != null
{
// At this point, either it's not in GII mode, or the placeholder
// is in region, do nothing
}
if (!entryExisted) {
owner.checkEntryNotFound(event.getKey());
}
}
// while(retry)
}
// !forceNewEntry
} catch (DiskAccessException dae) {
invalidatedRe = null;
didInvalidate = false;
this._getOwner().handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
if (invalidatedRe != null) {
owner.basicInvalidatePart3(invalidatedRe, event, invokeCallbacks);
}
if (didInvalidate && !clearOccured) {
try {
lruUpdateCallback();
} catch (DiskAccessException dae) {
this._getOwner().handleDiskAccessException(dae);
throw dae;
}
} else if (!didInvalidate) {
resetThreadLocals();
}
}
return didInvalidate;
} finally {
if (ownerIsInitialized) {
forceInvalidateEvent(event, owner);
}
}
} finally {
releaseCacheModificationLock(owner, event);
}
}
use of org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException in project geode by apache.
the class AbstractRegionEntry method basicProcessVersionTag.
protected void basicProcessVersionTag(LocalRegion region, VersionTag tag, boolean isTombstoneFromGII, boolean deltaCheck, VersionSource dmId, InternalDistributedMember sender, boolean checkForConflict) {
if (tag != null) {
VersionStamp stamp = getVersionStamp();
StringBuilder verbose = null;
if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
VersionTag stampTag = stamp.asVersionTag();
if (stampTag.hasValidVersion() && checkForConflict) {
// only be verbose here if there's a possibility we might reject the operation
verbose = new StringBuilder();
verbose.append("processing tag for key ").append(getKey()).append(", stamp=").append(stamp.asVersionTag()).append(", tag=").append(tag).append(", checkForConflict=").append(checkForConflict);
}
}
if (stamp == null) {
throw new IllegalStateException("message contained a version tag but this region has no version storage");
}
boolean apply = true;
try {
if (checkForConflict) {
apply = checkForConflict(region, stamp, tag, isTombstoneFromGII, deltaCheck, dmId, sender, verbose);
}
} catch (ConcurrentCacheModificationException e) {
// applied there
if (!tag.isGatewayTag() && stamp.getDistributedSystemId() == tag.getDistributedSystemId() && tag.getVersionTimeStamp() > stamp.getVersionTimeStamp()) {
stamp.setVersionTimeStamp(tag.getVersionTimeStamp());
tag.setTimeStampApplied(true);
if (verbose != null) {
verbose.append("\nThough in conflict the tag timestamp was more recent and was recorded.");
}
}
throw e;
} finally {
if (verbose != null) {
logger.trace(LogMarker.TOMBSTONE, verbose);
}
}
if (apply) {
applyVersionTag(region, stamp, tag, sender);
}
}
}
Aggregations