use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class AbstractRegionEntry method checkForDeltaConflict.
/**
* for an event containing a delta we must check to see if the tag's previous member id is the
* stamp's member id and ensure that the version is only incremented by 1. Otherwise the delta is
* being applied to a value that does not match the source of the delta.
*/
private void checkForDeltaConflict(LocalRegion region, long stampVersion, long tagVersion, VersionStamp stamp, VersionTag tag, VersionSource dmId, InternalDistributedMember sender, StringBuilder verbose) {
if (tagVersion != stampVersion + 1) {
if (verbose != null) {
verbose.append("\ndelta requires full value due to version mismatch");
}
region.getCachePerfStats().incDeltaFailedUpdates();
throw new InvalidDeltaException("delta cannot be applied due to version mismatch");
} else {
// make sure the tag was based on the value in this entry by checking the
// tag's previous-changer ID against this stamp's current ID
VersionSource stampID = stamp.getMemberID();
if (stampID == null) {
stampID = dmId;
}
VersionSource tagID = tag.getPreviousMemberID();
if (tagID == null) {
tagID = sender;
}
if (!tagID.equals(stampID)) {
if (verbose != null) {
verbose.append("\ndelta requires full value. tag.previous=").append(tagID).append(" but stamp.current=").append(stampID);
}
region.getCachePerfStats().incDeltaFailedUpdates();
throw new InvalidDeltaException("delta cannot be applied due to version ID mismatch");
}
}
}
use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class AbstractRegionEntry method processVersionTag.
protected void processVersionTag(EntryEvent cacheEvent, boolean conflictCheck) {
EntryEventImpl event = (EntryEventImpl) cacheEvent;
VersionTag tag = event.getVersionTag();
if (tag == null) {
return;
}
try {
if (tag.isGatewayTag()) {
// this may throw ConcurrentCacheModificationException or modify the event
if (processGatewayTag(cacheEvent)) {
return;
}
assert false : "processGatewayTag failure - returned false";
}
if (!tag.isFromOtherMember()) {
if (!event.getOperation().isNetSearch()) {
// except for netsearch, all locally-generated tags can be ignored
return;
}
}
final InternalDistributedMember originator = (InternalDistributedMember) event.getDistributedMember();
final VersionSource dmId = event.getRegion().getVersionMember();
LocalRegion r = event.getLocalRegion();
boolean eventHasDelta = event.getDeltaBytes() != null && event.getRawNewValue() == null;
VersionStamp stamp = getVersionStamp();
// perform a gateway conflict check
if (stamp != null && !tag.isAllowedByResolver()) {
int stampDsId = stamp.getDistributedSystemId();
int tagDsId = tag.getDistributedSystemId();
if (stampDsId != 0 && stampDsId != tagDsId && stampDsId != -1) {
StringBuilder verbose = null;
if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
verbose = new StringBuilder();
verbose.append("processing tag for key " + getKey() + ", stamp=" + stamp.asVersionTag() + ", tag=").append(tag);
}
long stampTime = stamp.getVersionTimeStamp();
long tagTime = tag.getVersionTimeStamp();
if (stampTime > 0 && (tagTime > stampTime || (tagTime == stampTime && tag.getDistributedSystemId() >= stamp.getDistributedSystemId()))) {
if (verbose != null) {
verbose.append(" - allowing event");
logger.trace(LogMarker.TOMBSTONE, verbose);
}
// Update the stamp with event's version information.
applyVersionTag(r, stamp, tag, originator);
return;
}
if (stampTime > 0) {
if (verbose != null) {
verbose.append(" - disallowing event");
logger.trace(LogMarker.TOMBSTONE, verbose);
}
r.getCachePerfStats().incConflatedEventsCount();
persistConflictingTag(r, tag);
throw new ConcurrentCacheModificationException("conflicting event detected");
}
}
}
if (r.getVersionVector() != null && r.getServerProxy() == null && (r.getDataPolicy().withPersistence() || !r.getScope().isLocal())) {
// bug #45258 - perf degradation for local regions and RVV
VersionSource who = tag.getMemberID();
if (who == null) {
who = originator;
}
r.getVersionVector().recordVersion(who, tag);
}
assert !tag.isFromOtherMember() || tag.getMemberID() != null : "remote tag is missing memberID";
// for a long time I had conflict checks turned off in clients when
// receiving a response from a server and applying it to the cache. This lowered
// the CPU cost of versioning but eventually had to be pulled for bug #45453
// events coming from servers while a local sync is held on the entry
// do not require a conflict check. Conflict checks were already
// performed on the server and here we just consume whatever was sent back.
// Event.isFromServer() returns true for client-update messages and
// for putAll/getAll, which do not hold syncs during the server operation.
// for a very long time we had conflict checks turned off for PR buckets.
// Bug 45669 showed a primary dying in the middle of distribution. This caused
// one backup bucket to have a v2. The other bucket was promoted to primary and
// generated a conflicting v2. We need to do the check so that if this second
// v2 loses to the original one in the delta-GII operation that the original v2
// will be the winner in both buckets.
// The new value in event is not from GII, even it could be tombstone
basicProcessVersionTag(r, tag, false, eventHasDelta, dmId, originator, conflictCheck);
} catch (ConcurrentCacheModificationException ex) {
event.isConcurrencyConflict(true);
throw ex;
}
}
use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class AbstractRegionEntry method initialImageInit.
@Override
public boolean initialImageInit(final LocalRegion region, final long lastModified, final Object newValue, final boolean create, final boolean wasRecovered, final boolean acceptedVersionTag) throws RegionClearedException {
// note that the caller has already write synced this RegionEntry
boolean result = false;
// if it has been destroyed then don't do anything
Token vTok = getValueAsToken();
if (acceptedVersionTag || create || (vTok != Token.DESTROYED || vTok != Token.TOMBSTONE)) {
// OFFHEAP noop
Object newValueToWrite = newValue;
// OFFHEAP noop
boolean putValue = acceptedVersionTag || create || (newValueToWrite != Token.LOCAL_INVALID && (wasRecovered || (vTok == Token.LOCAL_INVALID)));
if (region.isUsedForPartitionedRegionAdmin() && newValueToWrite instanceof CachedDeserializable) {
// Special case for partitioned region meta data
// We do not need the RegionEntry on this case.
// Because the pr meta data region will not have an LRU.
newValueToWrite = ((CachedDeserializable) newValueToWrite).getDeserializedValue(region, null);
if (!create && newValueToWrite instanceof Versionable) {
// Heap value should always be deserialized at this point // OFFHEAP will not be
// deserialized
final Object oldValue = getValueInVM(region);
// BUGFIX for 35029. If oldValue is null the newValue should be put.
if (oldValue == null) {
putValue = true;
} else if (oldValue instanceof Versionable) {
Versionable nv = (Versionable) newValueToWrite;
Versionable ov = (Versionable) oldValue;
putValue = nv.isNewerThan(ov);
}
}
}
if (putValue) {
// and current value is recovered
if (create || acceptedVersionTag) {
// At this point, since we now always recover from disk first,
// we only care about "isCreate" since "isRecovered" is impossible
// if we had a regionInvalidate or regionClear
ImageState imageState = region.getImageState();
// this method is called during loadSnapshot as well as getInitialImage
if (imageState.getRegionInvalidated()) {
if (newValueToWrite != Token.TOMBSTONE) {
newValueToWrite = Token.INVALID;
}
} else if (imageState.getClearRegionFlag()) {
boolean entryOK = false;
RegionVersionVector rvv = imageState.getClearRegionVersionVector();
if (rvv != null) {
// a filtered clear
VersionSource id = getVersionStamp().getMemberID();
if (id == null) {
id = region.getVersionMember();
}
if (!rvv.contains(id, getVersionStamp().getRegionVersion())) {
entryOK = true;
}
}
if (!entryOK) {
// If the region has been issued cleared during
// the GII , then those entries loaded before this one would have
// been cleared from the Map due to clear operation & for the
// currententry whose key may have escaped the clearance , will be
// cleansed by the destroy token.
// TODO: never used
newValueToWrite = Token.DESTROYED;
imageState.addDestroyedEntry(this.getKey());
throw new RegionClearedException(LocalizedStrings.AbstractRegionEntry_DURING_THE_GII_PUT_OF_ENTRY_THE_REGION_GOT_CLEARED_SO_ABORTING_THE_OPERATION.toLocalizedString());
}
}
}
setValue(region, this.prepareValueForCache(region, newValueToWrite, false));
result = true;
if (newValueToWrite != Token.TOMBSTONE) {
if (create) {
region.getCachePerfStats().incCreates();
}
region.updateStatsForPut(this, lastModified, false);
}
if (logger.isTraceEnabled()) {
if (newValueToWrite instanceof CachedDeserializable) {
logger.trace("ProcessChunk: region={}; put a CachedDeserializable ({},{})", region.getFullPath(), getKey(), ((CachedDeserializable) newValueToWrite).getStringForm());
} else {
logger.trace("ProcessChunk: region={}; put({},{})", region.getFullPath(), getKey(), StringUtils.forceToString(newValueToWrite));
}
}
}
}
return result;
}
use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class AbstractRegionEntry method applyVersionTag.
private void applyVersionTag(LocalRegion region, VersionStamp stamp, VersionTag tag, InternalDistributedMember sender) {
VersionSource mbr = tag.getMemberID();
if (mbr == null) {
mbr = sender;
}
mbr = region.getVersionVector().getCanonicalId(mbr);
tag.setMemberID(mbr);
stamp.setVersions(tag);
if (tag.hasPreviousMemberID()) {
if (tag.getPreviousMemberID() == null) {
tag.setPreviousMemberID(stamp.getMemberID());
} else {
tag.setPreviousMemberID(region.getVersionVector().getCanonicalId(tag.getPreviousMemberID()));
}
}
}
use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class TombstoneService method gcTombstones.
/**
* remove tombstones from the given region that have region-versions <= those in the given removal
* map
*
* @return a collection of keys removed (only if the region is a bucket - empty otherwise)
*/
@SuppressWarnings("rawtypes")
public Set<Object> gcTombstones(LocalRegion r, Map<VersionSource, Long> regionGCVersions, boolean needsKeys) {
synchronized (getBlockGCLock()) {
int count = getGCBlockCount();
if (count > 0) {
// if any delta GII is on going as provider at this member, not to do tombstone GC
if (logger.isDebugEnabled()) {
logger.debug("gcTombstones skipped due to {} Delta GII on going", count);
}
return null;
}
if (logger.isDebugEnabled()) {
logger.debug("gcTombstones invoked for region {} and version map {}", r, regionGCVersions);
}
final VersionSource myId = r.getVersionMember();
final TombstoneSweeper sweeper = getSweeper(r);
final List<Tombstone> removals = new ArrayList<Tombstone>();
sweeper.removeUnexpiredIf(t -> {
if (t.region == r) {
VersionSource destroyingMember = t.getMemberID();
if (destroyingMember == null) {
destroyingMember = myId;
}
Long maxReclaimedRV = regionGCVersions.get(destroyingMember);
if (maxReclaimedRV != null && t.getRegionVersion() <= maxReclaimedRV) {
removals.add(t);
return true;
}
}
return false;
});
// Record the GC versions now, so that we can persist them
for (Map.Entry<VersionSource, Long> entry : regionGCVersions.entrySet()) {
r.getVersionVector().recordGCVersion(entry.getKey(), entry.getValue());
}
// Remove any exceptions from the RVV that are older than the GC version
r.getVersionVector().pruneOldExceptions();
// GV RVV.
if (r.getDataPolicy().withPersistence()) {
// Update the version vector which reflects what has been persisted on disk.
r.getDiskRegion().writeRVVGC(r);
}
Set<Object> removedKeys = needsKeys ? new HashSet<Object>() : Collections.emptySet();
for (Tombstone t : removals) {
boolean tombstoneWasStillInRegionMap = t.region.getRegionMap().removeTombstone(t.entry, t, false, true);
if (needsKeys && tombstoneWasStillInRegionMap) {
removedKeys.add(t.entry.getKey());
}
}
return removedKeys;
}
// sync on deltaGIILock
}
Aggregations