use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class AbstractRegionMap method basicPut.
/*
* returns null if the operation fails
*/
public RegionEntry basicPut(EntryEventImpl event, final long lastModified, final boolean ifNew, // only non-null if ifOld
final boolean ifOld, // only non-null if ifOld
Object expectedOldValue, boolean requireOldValue, final boolean overwriteDestroyed) throws CacheWriterException, TimeoutException {
final LocalRegion owner = _getOwner();
boolean clearOccured = false;
if (owner == null) {
// "fix" for bug 32440
Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
}
if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT) && !(owner instanceof HARegion)) {
logger.trace(LogMarker.LRU_TOMBSTONE_COUNT, "ARM.basicPut called for {} expectedOldValue={} requireOldValue={} ifNew={} ifOld={} initialized={} overwriteDestroyed={}", event, expectedOldValue, requireOldValue, ifNew, ifOld, owner.isInitialized(), overwriteDestroyed);
}
RegionEntry result = null;
long lastModifiedTime = 0;
// copy into local var to prevent race condition with setter
final CacheWriter cacheWriter = owner.basicGetWriter();
final boolean cacheWrite = !event.isOriginRemote() && !event.isNetSearch() && event.isGenerateCallbacks() && (cacheWriter != null || owner.hasServerProxy() || owner.scope.isDistributed());
/*
* For performance reason, we try to minimize object creation and do as much work as we can
* outside of synchronization, especially getting distribution advice.
*/
final Set netWriteRecipients;
if (cacheWrite) {
if (cacheWriter == null && owner.scope.isDistributed()) {
netWriteRecipients = ((DistributedRegion) owner).getCacheDistributionAdvisor().adviseNetWrite();
} else {
netWriteRecipients = null;
}
} else {
netWriteRecipients = null;
}
// mbid: this has been added to maintain consistency between the disk region
// and the region map after clear() has been called. This will set the
// reference of the diskSegmentRegion as a ThreadLocal so that if the diskRegionSegment
// is later changed by another thread, we can do the necessary.
boolean uninitialized = !owner.isInitialized();
boolean retrieveOldValueForDelta = event.getDeltaBytes() != null && event.getRawNewValue() == null;
IndexManager oqlIndexManager = null;
lockForCacheModification(owner, event);
try {
try {
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
// fix for bug #42169, replace must go to server if entry not on client
boolean replaceOnClient = event.getOperation() == Operation.REPLACE && owner.getServerProxy() != null;
// Rather than having two different blocks for synchronizing oldRe
// and newRe, have only one block and synchronize re
RegionEntry re = null;
boolean eventRecorded = false;
boolean onlyExisting = ifOld && !replaceOnClient;
re = getOrCreateRegionEntry(owner, event, Token.REMOVED_PHASE1, null, onlyExisting, false);
if (re == null) {
return null;
}
while (true) {
synchronized (re) {
// and change its state
if (re.isRemovedPhase2()) {
_getOwner().getCachePerfStats().incRetries();
_getMap().remove(event.getKey(), re);
re = getOrCreateRegionEntry(owner, event, Token.REMOVED_PHASE1, null, onlyExisting, false);
if (re == null) {
// this will happen when onlyExisting is true
return null;
}
continue;
} else {
@Released Object oldValueForDelta = null;
if (retrieveOldValueForDelta) {
// defer the lruUpdateCallback to prevent a deadlock (see bug 51121).
final boolean disabled = disableLruUpdateCallback();
try {
// Old value is faulted in from disk if not found in memory.
// OFFHEAP: if we are synced on oldRe no
oldValueForDelta = re.getValue(owner);
// issue since we can use ARE's ref
} finally {
if (disabled) {
enableLruUpdateCallback();
}
}
}
try {
event.setRegionEntry(re);
// set old value in event
setOldValueInEvent(event, re, cacheWrite, requireOldValue);
if (!continueUpdate(re, event, ifOld, replaceOnClient)) {
return null;
}
// overwrite destroyed?
if (!continueOverwriteDestroyed(re, event, overwriteDestroyed, ifNew)) {
return null;
}
// check expectedOldValue
if (!satisfiesExpectedOldValue(event, re, expectedOldValue, replaceOnClient)) {
return null;
}
// invoke cacheWriter
invokeCacheWriter(re, event, cacheWrite, cacheWriter, netWriteRecipients, requireOldValue, expectedOldValue, replaceOnClient);
// notify index of an update
notifyIndex(re, true);
try {
try {
if (// if there is a
(cacheWrite && event.getOperation().isUpdate()) || // set
!re.isRemoved() || replaceOnClient) {
// update
updateEntry(event, requireOldValue, oldValueForDelta, re);
} else {
// create
createEntry(event, owner, re);
}
owner.recordEvent(event);
eventRecorded = true;
} catch (RegionClearedException rce) {
clearOccured = true;
owner.recordEvent(event);
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
if (uninitialized) {
event.inhibitCacheListenerNotification(true);
}
updateLru(clearOccured, re, event);
lastModifiedTime = owner.basicPutPart2(event, re, !uninitialized, lastModifiedTime, clearOccured);
} finally {
notifyIndex(re, false);
}
result = re;
break;
} finally {
OffHeapHelper.release(oldValueForDelta);
if (re != null && !onlyExisting && !isOpComplete(re, event)) {
owner.cleanUpOnIncompleteOp(event, re);
} else if (re != null && owner.isUsedForPartitionedRegionBucket()) {
BucketRegion br = (BucketRegion) owner;
CachePerfStats stats = br.getPartitionedRegion().getCachePerfStats();
}
}
// try
}
}
// sync re
}
// end while
} catch (DiskAccessException dae) {
// Asif:Feel that it is safe to destroy the region here as there appears
// to be no chance of deadlock during region destruction
result = null;
this._getOwner().handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
if (result != null) {
try {
// Note we do distribution after releasing all sync to avoid deadlock
final boolean invokeListeners = event.basicGetNewValue() != Token.TOMBSTONE;
owner.basicPutPart3(event, result, !uninitialized, lastModifiedTime, invokeListeners, ifNew, ifOld, expectedOldValue, requireOldValue);
} finally {
// for any recipients
if (!clearOccured) {
try {
lruUpdateCallback();
} catch (DiskAccessException dae) {
// Asif:Feel that it is safe to destroy the region here as there appears
// to be no chance of deadlock during region destruction
result = null;
this._getOwner().handleDiskAccessException(dae);
throw dae;
}
}
}
// finally
} else {
resetThreadLocals();
}
}
} finally {
releaseCacheModificationLock(owner, event);
}
return result;
}
use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class AbstractRegionMap method destroy.
public boolean destroy(EntryEventImpl event, boolean inTokenMode, boolean duringRI, boolean cacheWrite, boolean isEviction, Object expectedOldValue, boolean removeRecoveredEntry) throws CacheWriterException, EntryNotFoundException, TimeoutException {
final LocalRegion owner = _getOwner();
if (owner == null) {
Assert.assertTrue(false, // "fix" for bug 32440
"The owner for RegionMap " + this + " is null for event " + event);
}
boolean retry = true;
lockForCacheModification(owner, event);
try {
while (retry) {
retry = false;
boolean opCompleted = false;
boolean doPart3 = false;
// We need to acquire the region entry while holding the lock to avoid #45620.
// The outer try/finally ensures that the lock will be released without fail.
// I'm avoiding indenting just to preserve the ability
// to track diffs since the code is fairly complex.
RegionEntry re = getOrCreateRegionEntry(owner, event, Token.REMOVED_PHASE1, null, true, true);
RegionEntry tombstone = null;
boolean haveTombstone = false;
/*
* Execute the test hook runnable inline (not threaded) if it is not null.
*/
if (null != testHookRunnableFor48182) {
testHookRunnableFor48182.run();
}
try {
if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT) && !(owner instanceof HARegion)) {
logger.trace(LogMarker.LRU_TOMBSTONE_COUNT, "ARM.destroy() inTokenMode={}; duringRI={}; riLocalDestroy={}; withRepl={}; fromServer={}; concurrencyEnabled={}; isOriginRemote={}; isEviction={}; operation={}; re={}", inTokenMode, duringRI, event.isFromRILocalDestroy(), owner.dataPolicy.withReplication(), event.isFromServer(), owner.concurrencyChecksEnabled, event.isOriginRemote(), isEviction, event.getOperation(), re);
}
if (event.isFromRILocalDestroy()) {
// for RI local-destroy we don't want to keep tombstones.
// In order to simplify things we just set this recovery
// flag to true to force the entry to be removed
removeRecoveredEntry = true;
}
// for a tombstone here and, if found, pretend for a bit that the entry is null
if (re != null && re.isTombstone() && !removeRecoveredEntry) {
tombstone = re;
haveTombstone = true;
re = null;
}
IndexManager oqlIndexManager = owner.getIndexManager();
if (re == null) {
// we need to create an entry if in token mode or if we've received
// a destroy from a peer or WAN gateway and we need to retain version
// information for concurrency checks
boolean retainForConcurrency = (!haveTombstone && (owner.dataPolicy.withReplication() || event.isFromServer()) && owner.concurrencyChecksEnabled && (event.isOriginRemote() || /* destroy received from other must create tombstone */
event.isFromWANAndVersioned() || /* wan event must create a tombstone */
event.isBridgeEvent()));
/*
* event from client must create a tombstone so
* client has a version #
*/
if (inTokenMode || retainForConcurrency) {
// removeRecoveredEntry should be false in this case
RegionEntry newRe = getEntryFactory().createEntry(owner, event.getKey(), Token.REMOVED_PHASE1);
// happened in parallel as it also does index INIT.
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
try {
synchronized (newRe) {
RegionEntry oldRe = putEntryIfAbsent(event.getKey(), newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(event.getKey(), oldRe);
oldRe = putEntryIfAbsent(event.getKey(), newRe);
} else {
event.setRegionEntry(oldRe);
// is being added to transaction state.
if (isEviction) {
if (!confirmEvictionDestroy(oldRe)) {
opCompleted = false;
return opCompleted;
}
}
try {
// if concurrency checks are enabled, destroy will
// set the version tag
boolean destroyed = destroyEntry(oldRe, event, inTokenMode, cacheWrite, expectedOldValue, false, removeRecoveredEntry);
if (destroyed) {
if (retainForConcurrency) {
owner.basicDestroyBeforeRemoval(oldRe, event);
}
owner.basicDestroyPart2(oldRe, event, inTokenMode, false, /* conflict with clear */
duringRI, true);
lruEntryDestroy(oldRe);
doPart3 = true;
}
} catch (RegionClearedException rce) {
// Ignore. The exception will ensure that we do not update
// the LRU List
owner.basicDestroyPart2(oldRe, event, inTokenMode, true, /* conflict with clear */
duringRI, true);
doPart3 = true;
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
re = oldRe;
opCompleted = true;
}
}
// synchronized oldRe
}
// while
if (!opCompleted) {
// happens if we didn't get completed with oldRe in the above while loop.
try {
re = newRe;
event.setRegionEntry(newRe);
try {
// set the version tag
if (isEviction) {
opCompleted = false;
return opCompleted;
}
opCompleted = destroyEntry(newRe, event, inTokenMode, cacheWrite, expectedOldValue, true, removeRecoveredEntry);
if (opCompleted) {
// This is a new entry that was created because we are in
// token mode or are accepting a destroy operation by adding
// a tombstone. There is no oldValue, so we don't need to
// call updateSizeOnRemove
// owner.recordEvent(event);
// native clients need to know if the
event.setIsRedestroyedEntry(true);
// entry didn't exist
if (retainForConcurrency) {
owner.basicDestroyBeforeRemoval(oldRe, event);
}
owner.basicDestroyPart2(newRe, event, inTokenMode, false, /* conflict with clear */
duringRI, true);
doPart3 = true;
}
} catch (RegionClearedException rce) {
// Ignore. The exception will ensure that we do not update
// the LRU List
opCompleted = true;
EntryLogger.logDestroy(event);
owner.basicDestroyPart2(newRe, event, inTokenMode, true, /* conflict with clear */
duringRI, true);
doPart3 = true;
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
// Note no need for LRU work since the entry is destroyed
// and will be removed when gii completes
} finally {
if (!opCompleted && !haveTombstone) /* to fix bug 51583 do this for all operations */
{
removeEntry(event.getKey(), newRe, false);
}
if (!opCompleted && isEviction) {
removeEntry(event.getKey(), newRe, false);
}
}
}
// !opCompleted
}
// synchronized newRe
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
} else // inTokenMode or tombstone creation
{
if (!isEviction || owner.concurrencyChecksEnabled) {
// The following ensures that there is not a concurrent operation
// on the entry and leaves behind a tombstone if concurrencyChecksEnabled.
// It fixes bug #32467 by propagating the destroy to the server even though
// the entry isn't in the client
RegionEntry newRe = haveTombstone ? tombstone : getEntryFactory().createEntry(owner, event.getKey(), Token.REMOVED_PHASE1);
synchronized (newRe) {
if (haveTombstone && !tombstone.isTombstone()) {
// we have to check this again under synchronization since it may have changed
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
re = (RegionEntry) _getMap().putIfAbsent(event.getKey(), newRe);
if (re != null && re != tombstone) {
// concurrent change - try again
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
} else if (!isEviction) {
boolean throwex = false;
EntryNotFoundException ex = null;
try {
if (!cacheWrite) {
throwex = true;
} else {
try {
if (!removeRecoveredEntry) {
throwex = !owner.bridgeWriteBeforeDestroy(event, expectedOldValue);
}
} catch (EntryNotFoundException e) {
throwex = true;
ex = e;
}
}
if (throwex) {
if (!event.isOriginRemote() && !event.getOperation().isLocal() && (// if this is a replayed client
event.isFromBridgeAndVersioned() || // version
event.isFromWANAndVersioned())) {
// in peers
if (logger.isDebugEnabled()) {
logger.debug("ARM.destroy is allowing wan/client destroy of {} to continue", event.getKey());
}
throwex = false;
event.setIsRedestroyedEntry(true);
// distributing this destroy op.
if (re == null) {
re = newRe;
}
doPart3 = true;
}
}
if (throwex) {
if (ex == null) {
// Fix for 48182, check cache state and/or region state before sending
// entry not found.
// this is from the server and any exceptions will propogate to the client
owner.checkEntryNotFound(event.getKey());
} else {
throw ex;
}
}
} finally {
// either remove the entry or leave a tombstone
try {
if (!event.isOriginRemote() && event.getVersionTag() != null && owner.concurrencyChecksEnabled) {
// this shouldn't fail since we just created the entry.
// it will either generate a tag or apply a server's version tag
processVersionTag(newRe, event);
if (doPart3) {
owner.generateAndSetVersionTag(event, newRe);
}
try {
owner.recordEvent(event);
newRe.makeTombstone(owner, event.getVersionTag());
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
}
opCompleted = true;
// lruEntryCreate(newRe);
} else if (!haveTombstone) {
try {
assert newRe != tombstone;
newRe.setValue(owner, Token.REMOVED_PHASE2);
removeEntry(event.getKey(), newRe, false);
} catch (RegionClearedException e) {
// that's okay - we just need to remove the new entry
}
} else if (event.getVersionTag() != null) {
// haveTombstone - update the
// tombstone version info
processVersionTag(tombstone, event);
if (doPart3) {
owner.generateAndSetVersionTag(event, newRe);
}
// version tag
try {
tombstone.setValue(owner, Token.TOMBSTONE);
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
}
owner.recordEvent(event);
owner.rescheduleTombstone(tombstone, event.getVersionTag());
owner.basicDestroyPart2(tombstone, event, inTokenMode, true, /* conflict with clear */
duringRI, true);
opCompleted = true;
}
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
}
}
}
// synchronized(newRe)
}
}
} else // no current entry
{
// current entry exists
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
try {
synchronized (re) {
// if the entry is a tombstone and the event is from a peer or a client
// then we allow the operation to be performed so that we can update the
// version stamp. Otherwise we would retain an old version stamp and may allow
// an operation that is older than the destroy() to be applied to the cache
// Bug 45170: If removeRecoveredEntry, we treat tombstone as regular entry to be
// deleted
boolean createTombstoneForConflictChecks = (owner.concurrencyChecksEnabled && (event.isOriginRemote() || event.getContext() != null || removeRecoveredEntry));
if (!re.isRemoved() || createTombstoneForConflictChecks) {
if (re.isRemovedPhase2()) {
_getMap().remove(event.getKey(), re);
owner.getCachePerfStats().incRetries();
retry = true;
continue;
}
if (!event.isOriginRemote() && event.getOperation().isExpiration()) {
// used by a tx.
if (re.isInUseByTransaction()) {
opCompleted = false;
return opCompleted;
}
}
event.setRegionEntry(re);
// See comment above about eviction checks
if (isEviction) {
assert expectedOldValue == null;
if (!confirmEvictionDestroy(re)) {
opCompleted = false;
return opCompleted;
}
}
boolean removed = false;
try {
opCompleted = destroyEntry(re, event, inTokenMode, cacheWrite, expectedOldValue, false, removeRecoveredEntry);
if (opCompleted) {
// It is very, very important for Partitioned Regions to keep
// the entry in the map until after distribution occurs so that other
// threads performing a create on this entry wait until the destroy
// distribution is finished.
// keeping backup copies consistent. Fix for bug 35906.
// -- mthomas 07/02/2007 <-- how about that date, kinda cool eh?
owner.basicDestroyBeforeRemoval(re, event);
// do this before basicDestroyPart2 to fix bug 31786
if (!inTokenMode) {
if (re.getVersionStamp() == null) {
re.removePhase2();
removeEntry(event.getKey(), re, true, event, owner);
removed = true;
}
}
if (inTokenMode && !duringRI) {
event.inhibitCacheListenerNotification(true);
}
doPart3 = true;
owner.basicDestroyPart2(re, event, inTokenMode, false, /* conflict with clear */
duringRI, true);
// if (!re.isTombstone() || isEviction) {
lruEntryDestroy(re);
// } else {
// lruEntryUpdate(re);
// lruUpdateCallback = true;
// }
} else {
if (!inTokenMode) {
EntryLogger.logDestroy(event);
owner.recordEvent(event);
if (re.getVersionStamp() == null) {
re.removePhase2();
removeEntry(event.getKey(), re, true, event, owner);
lruEntryDestroy(re);
} else {
if (re.isTombstone()) {
// again, so we need to reschedule the tombstone's expiration
if (event.isOriginRemote()) {
owner.rescheduleTombstone(re, re.getVersionStamp().asVersionTag());
}
}
}
lruEntryDestroy(re);
opCompleted = true;
}
}
} catch (RegionClearedException rce) {
// Ignore. The exception will ensure that we do not update
// the LRU List
opCompleted = true;
owner.recordEvent(event);
if (inTokenMode && !duringRI) {
event.inhibitCacheListenerNotification(true);
}
owner.basicDestroyPart2(re, event, inTokenMode, true, /* conflict with clear */
duringRI, true);
doPart3 = true;
} finally {
if (re.isRemoved() && !re.isTombstone()) {
if (!removed) {
removeEntry(event.getKey(), re, true, event, owner);
}
}
}
} else // !isRemoved
{
// already removed
if (re.isTombstone() && event.getVersionTag() != null) {
// if we're dealing with a tombstone and this is a remote event
// (e.g., from cache client update thread) we need to update
// the tombstone's version information
// TODO use destroyEntry() here
processVersionTag(re, event);
try {
re.makeTombstone(owner, event.getVersionTag());
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
}
}
if (expectedOldValue != null) {
// if re is removed then there is no old value, so return false
return false;
}
if (!inTokenMode && !isEviction) {
owner.checkEntryNotFound(event.getKey());
}
}
}
// synchronized re
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
// No need to call lruUpdateCallback since the only lru action
// we may have taken was lruEntryDestroy. This fixes bug 31759.
}
// current entry exists
if (opCompleted) {
EntryLogger.logDestroy(event);
}
return opCompleted;
} finally {
try {
// do NOT distribute.
if (event.isConcurrencyConflict() && (event.getVersionTag() != null && event.getVersionTag().isGatewayTag())) {
doPart3 = false;
}
// distribution and listener notification
if (doPart3) {
owner.basicDestroyPart3(re, event, inTokenMode, duringRI, true, expectedOldValue);
}
} finally {
if (opCompleted) {
if (re != null) {
// we only want to cancel if concurrency-check is not enabled
// re(regionentry) will be null when concurrency-check is enable and removeTombstone
// method
// will call cancelExpiryTask on regionEntry
owner.cancelExpiryTask(re);
}
}
}
}
}
// retry loop
} finally {
// failsafe on the read lock...see comment above
releaseCacheModificationLock(owner, event);
}
return false;
}
use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class AbstractRegionEntry method destroy.
/**
* @throws EntryNotFoundException if expectedOldValue is not null and is not equal to current
* value
*/
@Override
@Released
public boolean destroy(LocalRegion region, EntryEventImpl event, boolean inTokenMode, boolean cacheWrite, @Unretained Object expectedOldValue, boolean forceDestroy, boolean removeRecoveredEntry) throws CacheWriterException, EntryNotFoundException, TimeoutException, RegionClearedException {
// A design decision was made to not retrieve the old value from the disk
// if the entry has been evicted to only have the CacheListener afterDestroy
// method ignore it. We don't want to pay the performance penalty. The
// getValueInVM method does not retrieve the value from disk if it has been
// evicted. Instead, it uses the NotAvailable token.
//
// If the region is a WAN queue region, the old value is actually used by the
// afterDestroy callback on a secondary. It is not needed on a primary.
// Since the destroy that sets WAN_QUEUE_TOKEN always originates on the primary
// we only pay attention to WAN_QUEUE_TOKEN if the event is originRemote.
//
// We also read old value from disk or buffer
// in the case where there is a non-null expectedOldValue
// see PartitionedRegion#remove(Object key, Object value)
ReferenceCountHelper.skipRefCountTracking();
@Retained @Released Object curValue = _getValueRetain(region, true);
ReferenceCountHelper.unskipRefCountTracking();
boolean proceed;
try {
if (curValue == null) {
curValue = Token.NOT_AVAILABLE;
}
if (curValue == Token.NOT_AVAILABLE) {
// the state of the transmitting cache's entry & should be used here
if (event.getCallbackArgument() != null && event.getCallbackArgument().equals(RegionQueue.WAN_QUEUE_TOKEN) && event.isOriginRemote()) {
// check originRemote for bug 40508
// curValue = getValue(region); can cause deadlock if GII is occurring
curValue = getValueOnDiskOrBuffer(region);
} else {
FilterProfile fp = region.getFilterProfile();
if (fp != null && (fp.getCqCount() > 0 || expectedOldValue != null)) {
// curValue = getValue(region); can cause deadlock will fault in the value
// and will confuse LRU.
curValue = getValueOnDiskOrBuffer(region);
}
}
}
if (expectedOldValue != null) {
if (!checkExpectedOldValue(expectedOldValue, curValue, region)) {
throw new EntryNotFoundException(LocalizedStrings.AbstractRegionEntry_THE_CURRENT_VALUE_WAS_NOT_EQUAL_TO_EXPECTED_VALUE.toLocalizedString());
}
}
if (inTokenMode && event.hasOldValue()) {
proceed = true;
} else {
proceed = event.setOldValue(curValue, curValue instanceof GatewaySenderEventImpl) || removeRecoveredEntry || forceDestroy || region.getConcurrencyChecksEnabled() || (event.getOperation() == Operation.REMOVE && (curValue == null || curValue == Token.LOCAL_INVALID || curValue == Token.INVALID));
}
} finally {
OffHeapHelper.releaseWithNoTracking(curValue);
}
if (proceed) {
// after the entry not found exception above.
if (!removeRecoveredEntry) {
region.generateAndSetVersionTag(event, this);
}
if (cacheWrite) {
region.cacheWriteBeforeDestroy(event, expectedOldValue);
if (event.getRegion().getServerProxy() != null) {
// server will return a version tag
// update version information (may throw ConcurrentCacheModificationException)
VersionStamp stamp = getVersionStamp();
if (stamp != null) {
stamp.processVersionTag(event);
}
}
}
region.recordEvent(event);
// RegionEntry (the old value) is invalid
if (!region.isProxy() && !isInvalid()) {
IndexManager indexManager = region.getIndexManager();
if (indexManager != null) {
try {
if (isValueNull()) {
@Released Object value = getValueOffHeapOrDiskWithoutFaultIn(region);
try {
Object preparedValue = prepareValueForCache(region, value, false);
_setValue(preparedValue);
releaseOffHeapRefIfRegionBeingClosedOrDestroyed(region, preparedValue);
} finally {
OffHeapHelper.release(value);
}
}
indexManager.updateIndexes(this, IndexManager.REMOVE_ENTRY, IndexProtocol.OTHER_OP);
} catch (QueryException e) {
throw new IndexMaintenanceException(e);
}
}
}
boolean removeEntry = false;
VersionTag v = event.getVersionTag();
if (region.concurrencyChecksEnabled && !removeRecoveredEntry && !event.isFromRILocalDestroy()) {
// Destroy will write a tombstone instead
if (v == null || !v.hasValidVersion()) {
// localDestroy and eviction and ops received with no version tag
// should create a tombstone using the existing version stamp, as should
// (bug #45245) responses from servers that do not have valid version information
VersionStamp stamp = this.getVersionStamp();
if (stamp != null) {
// proxy has no stamps
v = stamp.asVersionTag();
event.setVersionTag(v);
}
}
removeEntry = v == null || !v.hasValidVersion();
} else {
removeEntry = true;
}
if (removeEntry) {
boolean isThisTombstone = isTombstone();
if (inTokenMode && !event.getOperation().isEviction()) {
setValue(region, Token.DESTROYED);
} else {
removePhase1(region, false);
}
if (isThisTombstone) {
region.unscheduleTombstone(this);
}
} else {
makeTombstone(region, v);
}
return true;
} else {
return false;
}
}
use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class AbstractRegionEntry method processVersionTag.
protected void processVersionTag(EntryEvent cacheEvent, boolean conflictCheck) {
EntryEventImpl event = (EntryEventImpl) cacheEvent;
VersionTag tag = event.getVersionTag();
if (tag == null) {
return;
}
try {
if (tag.isGatewayTag()) {
// this may throw ConcurrentCacheModificationException or modify the event
if (processGatewayTag(cacheEvent)) {
return;
}
assert false : "processGatewayTag failure - returned false";
}
if (!tag.isFromOtherMember()) {
if (!event.getOperation().isNetSearch()) {
// except for netsearch, all locally-generated tags can be ignored
return;
}
}
final InternalDistributedMember originator = (InternalDistributedMember) event.getDistributedMember();
final VersionSource dmId = event.getRegion().getVersionMember();
LocalRegion r = event.getLocalRegion();
boolean eventHasDelta = event.getDeltaBytes() != null && event.getRawNewValue() == null;
VersionStamp stamp = getVersionStamp();
// perform a gateway conflict check
if (stamp != null && !tag.isAllowedByResolver()) {
int stampDsId = stamp.getDistributedSystemId();
int tagDsId = tag.getDistributedSystemId();
if (stampDsId != 0 && stampDsId != tagDsId && stampDsId != -1) {
StringBuilder verbose = null;
if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
verbose = new StringBuilder();
verbose.append("processing tag for key " + getKey() + ", stamp=" + stamp.asVersionTag() + ", tag=").append(tag);
}
long stampTime = stamp.getVersionTimeStamp();
long tagTime = tag.getVersionTimeStamp();
if (stampTime > 0 && (tagTime > stampTime || (tagTime == stampTime && tag.getDistributedSystemId() >= stamp.getDistributedSystemId()))) {
if (verbose != null) {
verbose.append(" - allowing event");
logger.trace(LogMarker.TOMBSTONE, verbose);
}
// Update the stamp with event's version information.
applyVersionTag(r, stamp, tag, originator);
return;
}
if (stampTime > 0) {
if (verbose != null) {
verbose.append(" - disallowing event");
logger.trace(LogMarker.TOMBSTONE, verbose);
}
r.getCachePerfStats().incConflatedEventsCount();
persistConflictingTag(r, tag);
throw new ConcurrentCacheModificationException("conflicting event detected");
}
}
}
if (r.getVersionVector() != null && r.getServerProxy() == null && (r.getDataPolicy().withPersistence() || !r.getScope().isLocal())) {
// bug #45258 - perf degradation for local regions and RVV
VersionSource who = tag.getMemberID();
if (who == null) {
who = originator;
}
r.getVersionVector().recordVersion(who, tag);
}
assert !tag.isFromOtherMember() || tag.getMemberID() != null : "remote tag is missing memberID";
// for a long time I had conflict checks turned off in clients when
// receiving a response from a server and applying it to the cache. This lowered
// the CPU cost of versioning but eventually had to be pulled for bug #45453
// events coming from servers while a local sync is held on the entry
// do not require a conflict check. Conflict checks were already
// performed on the server and here we just consume whatever was sent back.
// Event.isFromServer() returns true for client-update messages and
// for putAll/getAll, which do not hold syncs during the server operation.
// for a very long time we had conflict checks turned off for PR buckets.
// Bug 45669 showed a primary dying in the middle of distribution. This caused
// one backup bucket to have a v2. The other bucket was promoted to primary and
// generated a conflicting v2. We need to do the check so that if this second
// v2 loses to the original one in the delta-GII operation that the original v2
// will be the winner in both buckets.
// The new value in event is not from GII, even it could be tombstone
basicProcessVersionTag(r, tag, false, eventHasDelta, dmId, originator, conflictCheck);
} catch (ConcurrentCacheModificationException ex) {
event.isConcurrencyConflict(true);
throw ex;
}
}
use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class AbstractRegionEntry method processGatewayTag.
private boolean processGatewayTag(EntryEvent cacheEvent) {
// Gateway tags are installed in the server-side LocalRegion cache
// modification methods. They do not have version numbers or distributed
// member IDs. Instead they only have timestamps and distributed system IDs.
// If there is a resolver plug-in, invoke it. Otherwise we use the timestamps and
// distributed system IDs to determine whether to allow the event to proceed.
final boolean isDebugEnabled = logger.isDebugEnabled();
if (this.isRemoved() && !this.isTombstone()) {
// no conflict on a new entry
return true;
}
EntryEventImpl event = (EntryEventImpl) cacheEvent;
VersionTag tag = event.getVersionTag();
long stampTime = getVersionStamp().getVersionTimeStamp();
long tagTime = tag.getVersionTimeStamp();
int stampDsid = getVersionStamp().getDistributedSystemId();
int tagDsid = tag.getDistributedSystemId();
if (isDebugEnabled) {
logger.debug("processing gateway version information for {}. Stamp dsid={} time={} Tag dsid={} time={}", event.getKey(), stampDsid, stampTime, tagDsid, tagTime);
}
if (tagTime == VersionTag.ILLEGAL_VERSION_TIMESTAMP) {
// no timestamp received from other system - just apply it
return true;
}
if (tagDsid == stampDsid || stampDsid == -1) {
return true;
}
GatewayConflictResolver resolver = event.getRegion().getCache().getGatewayConflictResolver();
if (resolver != null) {
if (isDebugEnabled) {
logger.debug("invoking gateway conflict resolver");
}
final boolean[] disallow = new boolean[1];
final Object[] newValue = new Object[] { this };
GatewayConflictHelper helper = new GatewayConflictHelper() {
@Override
public void disallowEvent() {
disallow[0] = true;
}
@Override
public void changeEventValue(Object value) {
newValue[0] = value;
}
};
@Released TimestampedEntryEventImpl timestampedEvent = (TimestampedEntryEventImpl) event.getTimestampedEvent(tagDsid, stampDsid, tagTime, stampTime);
// gateway conflict resolvers will usually want to see the old value
if (!timestampedEvent.hasOldValue() && isRemoved()) {
// OFFHEAP: since isRemoved I think getValue will never be stored off heap in this case
timestampedEvent.setOldValue(getValue(timestampedEvent.getRegion()));
}
Throwable thr = null;
try {
resolver.onEvent(timestampedEvent, helper);
} catch (CancelException cancelled) {
throw cancelled;
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
logger.error(LocalizedMessage.create(LocalizedStrings.LocalRegion_EXCEPTION_OCCURRED_IN_CONFLICTRESOLVER), t);
thr = t;
} finally {
timestampedEvent.release();
}
if (isDebugEnabled) {
logger.debug("done invoking resolver", thr);
}
if (thr == null) {
if (disallow[0]) {
if (isDebugEnabled) {
logger.debug("conflict resolver rejected the event for {}", event.getKey());
}
throw new ConcurrentCacheModificationException("WAN conflict resolver rejected the operation");
}
tag.setAllowedByResolver(true);
if (newValue[0] != this) {
if (isDebugEnabled) {
logger.debug("conflict resolver changed the value of the event for {}", event.getKey());
}
// the resolver changed the event value!
event.setNewValue(newValue[0]);
}
// if nothing was done then we allow the event
if (isDebugEnabled) {
logger.debug("change was allowed by conflict resolver: {}", tag);
}
return true;
}
}
if (isDebugEnabled) {
logger.debug("performing normal WAN conflict check");
}
if (tagTime > stampTime || tagTime == stampTime && tagDsid >= stampDsid) {
if (isDebugEnabled) {
logger.debug("allowing event");
}
return true;
}
if (isDebugEnabled) {
logger.debug("disallowing event for {}", event.getKey());
}
throw new ConcurrentCacheModificationException("conflicting WAN event detected");
}
Aggregations