use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class DistributionAdvisor method syncForCrashedMember.
/** perform a delta-GII for the given lost member */
public void syncForCrashedMember(final InternalDistributedMember id, final Profile profile) {
final DistributedRegion dr = getRegionForDeltaGII();
if (dr == null) {
return;
}
final boolean isDebugEnabled = logger.isDebugEnabled();
if (isDebugEnabled) {
logger.debug("da.syncForCrashedMember will sync region in waiting thread pool: {}", dr);
}
dr.getDistributionManager().getWaitingThreadPool().execute(new Runnable() {
// bug #49601 - don't synchronize until GII has been performed
public void run() {
while (!dr.isInitialized()) {
if (dr.isDestroyed()) {
return;
} else {
try {
if (isDebugEnabled) {
logger.debug("da.syncForCrashedMember waiting for region to finish initializing: {}", dr);
}
Thread.sleep(100);
} catch (InterruptedException e) {
return;
}
}
}
CacheProfile cp = (CacheProfile) profile;
PersistentMemberID persistentId = cp.persistentID;
if (dr.getDataPolicy().withPersistence() && persistentId == null) {
// a persistent member.
if (isDebugEnabled) {
logger.debug("da.syncForCrashedMember skipping sync because crashed member is not persistent: {}", id);
}
return;
}
VersionSource lostVersionID;
if (persistentId != null) {
lostVersionID = persistentId.getVersionMember();
} else {
lostVersionID = id;
}
dr.synchronizeForLostMember(id, lostVersionID);
}
});
}
use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class LocalRegion method clearRegionLocally.
/**
* Common code used by both clear and localClear. On the lines of destroyRegion, this method will
* be invoked for clearing the local cache.The cmnClearRegion will be overridden in the derived
* class DistributedRegion too. For clear operation , no CacheWriter will be invoked . It will
* only have afterClear callback. Also like destroyRegion & invalidateRegion , the clear operation
* will not take distributedLock. The clear operation will also clear the local tranxnl entries .
* The clear operation will have immediate committed state.
*/
void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite, RegionVersionVector vector) {
final boolean isRvvDebugEnabled = logger.isTraceEnabled(LogMarker.RVV);
RegionVersionVector rvv = vector;
if (this.serverRegionProxy != null) {
// clients and local regions do not maintain a full RVV. can't use it with clear()
rvv = null;
}
if (rvv != null && this.dataPolicy.withStorage()) {
if (isRvvDebugEnabled) {
logger.trace(LogMarker.RVV, "waiting for my version vector to dominate{}mine={}{} other={}", getLineSeparator(), getLineSeparator(), this.versionVector.fullToString(), rvv);
}
boolean result = this.versionVector.waitToDominate(rvv, this);
if (!result) {
if (isRvvDebugEnabled) {
logger.trace(LogMarker.RVV, "incrementing clearTimeouts for {} rvv={}", getName(), this.versionVector.fullToString());
}
getCachePerfStats().incClearTimeouts();
}
}
// If the initial image operation is still in progress then we need will have to do the clear
// operation at the end of the GII.For this we try to acquire the lock of GII the boolean
// returned is true that means lock was obtained which also means that GII is still in progress.
boolean isGIIinProgress = lockGII();
if (isGIIinProgress) {
// Also we should try & abort the GII
try {
getImageState().setClearRegionFlag(true, /* Clear region */
rvv);
} finally {
unlockGII();
}
}
if (cacheWrite && !isGIIinProgress) {
this.cacheWriteBeforeRegionClear(regionEvent);
}
RegionVersionVector myVector = getVersionVector();
if (myVector != null) {
if (isRvvDebugEnabled) {
logger.trace(LogMarker.RVV, "processing version information for {}", regionEvent);
}
if (!regionEvent.isOriginRemote() && !regionEvent.getOperation().isLocal()) {
// generate a new version for the operation
VersionTag tag = VersionTag.create(getVersionMember());
tag.setVersionTimeStamp(cacheTimeMillis());
tag.setRegionVersion(myVector.getNextVersionWhileLocked());
if (isRvvDebugEnabled) {
logger.trace(LogMarker.RVV, "generated version tag for clear: {}", tag);
}
regionEvent.setVersionTag(tag);
} else {
VersionTag tag = regionEvent.getVersionTag();
if (tag != null) {
if (isRvvDebugEnabled) {
logger.trace(LogMarker.RVV, "recording version tag for clear: {}", tag);
}
// clear() events always have the ID in the tag
myVector.recordVersion(tag.getMemberID(), tag);
}
}
}
// Clear the expiration task for all the entries. It is possible that
// after clearing it some new entries may get added before issuing clear
// on the map , but that should be OK, as the expiration thread will
// silently move ahead if the entry to be expired no longer existed
this.cancelAllEntryExpiryTasks();
if (this.entryUserAttributes != null) {
this.entryUserAttributes.clear();
}
// be set to the current vector versions
if (rvv == null && myVector != null) {
myVector.removeOldVersions();
}
// clear the disk region if present
if (this.diskRegion != null) {
// persist current rvv and rvvgc which contained version for clear() itself
if (this.getDataPolicy().withPersistence()) {
// null means not to change dr.rvvTrust
if (isRvvDebugEnabled) {
logger.trace(LogMarker.RVV, "Clear: Saved current rvv: {}", this.diskRegion.getRegionVersionVector());
}
this.diskRegion.writeRVV(this, null);
this.diskRegion.writeRVVGC(this);
}
// clear the entries in disk
this.diskRegion.clear(this, rvv);
} else // this will be done in diskRegion.clear if it is not null else it has to be
// done here
{
// Now remove the tx entries for this region
txClearRegion();
// Now clear the map of committed entries
Set<VersionSource> remainingIDs = clearEntries(rvv);
if (!this.dataPolicy.withPersistence()) {
// persistent regions do not reap IDs
if (myVector != null) {
myVector.removeOldMembers(remainingIDs);
}
}
}
if (!isProxy()) {
// TODO made indexManager variable is made volatile. Is it necessary?
if (this.indexManager != null) {
try {
this.indexManager.rerunIndexCreationQuery();
} catch (QueryException qe) {
// TODO: never throw an annonymous class (and outer-class is not serializable)
throw new CacheRuntimeException(LocalizedStrings.LocalRegion_EXCEPTION_OCCURRED_WHILE_RE_CREATING_INDEX_DATA_ON_CLEARED_REGION.toLocalizedString(), qe) {
private static final long serialVersionUID = 0L;
};
}
}
}
if (ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
CacheObserverHolder.getInstance().afterRegionClear(regionEvent);
}
if (isGIIinProgress) {
return;
}
regionEvent.setEventType(EnumListenerEvent.AFTER_REGION_CLEAR);
// Issue a callback to afterClear if the region is initialized
boolean hasListener = hasListener();
if (hasListener) {
dispatchListenerEvent(EnumListenerEvent.AFTER_REGION_CLEAR, regionEvent);
}
}
use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class PRBucketSynchronizationDUnitTest method doBucketsSyncOnPrimaryLoss.
/**
* We hit this problem in bug #45669. A primary was lost and we did not see secondary buckets
* perform a delta-GII.
*/
public void doBucketsSyncOnPrimaryLoss(TestType typeOfTest) {
IgnoredException.addIgnoredException("killing member's ds");
IgnoredException.addIgnoredException("killing member's ds");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
Set<VM> verifyVMs = new HashSet<VM>();
final String name = this.getUniqueName() + "Region";
verifyVMs.add(vm0);
verifyVMs.add(vm1);
verifyVMs.add(vm2);
disconnectAllFromDS();
try {
createRegion(vm0, name, typeOfTest);
createRegion(vm1, name, typeOfTest);
createRegion(vm2, name, typeOfTest);
createEntry1(vm0);
VM primaryOwner;
if (isPrimaryForBucket0(vm0))
primaryOwner = vm0;
else if (isPrimaryForBucket0(vm1))
primaryOwner = vm1;
else
primaryOwner = vm2;
verifyVMs.remove(primaryOwner);
// cause one of the VMs to throw away the next operation
VM creatorVM = null;
InternalDistributedMember primaryID = getID(primaryOwner);
VersionSource primaryVersionID = getVersionID(primaryOwner);
for (VM vm : verifyVMs) {
creatorVM = vm;
createEntry2(creatorVM, primaryID, primaryVersionID);
break;
}
verifyVMs.remove(creatorVM);
// Now we crash the primary bucket owner simulating death during distribution.
// The backup buckets should perform a delta-GII for the lost member and
// get back in sync
DistributedTestUtils.crashDistributedSystem(primaryOwner);
for (VM vm : verifyVMs) {
verifySynchronized(vm, primaryID);
}
} finally {
disconnectAllFromDS();
}
}
use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class UpdateVersionJUnitTest method testUpdateVersionAfterUpdateOnPR.
@Test
public void testUpdateVersionAfterUpdateOnPR() {
Cache cache = new CacheFactory().set(MCAST_PORT, "0").create();
Region region = cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
try {
region.create("key-1", "value-1");
try {
Thread.sleep(10);
} catch (InterruptedException e) {
}
region.put("key-1", "value-2");
Entry entry = region.getEntry("key-1");
assertTrue(entry instanceof EntrySnapshot);
RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
VersionStamp stamp = regionEntry.getVersionStamp();
// Create a duplicate entry version tag from stamp with newer time-stamp.
VersionTag tag = VersionTag.create(stamp.getMemberID());
int entryVersion = stamp.getEntryVersion();
VersionSource member = stamp.getMemberID();
int dsid = stamp.getDistributedSystemId();
long time = System.currentTimeMillis();
tag.setEntryVersion(entryVersion);
tag.setDistributedSystemId(dsid);
tag.setVersionTimeStamp(time);
tag.setIsGatewayTag(true);
assertTrue(region instanceof PartitionedRegion);
EntryEventImpl event = createNewEvent((PartitionedRegion) region, tag, entry.getKey());
((PartitionedRegion) region).basicUpdateEntryVersion(event);
// Verify the new stamp
entry = region.getEntry("key-1");
assertTrue(entry instanceof EntrySnapshot);
regionEntry = ((EntrySnapshot) entry).getRegionEntry();
stamp = regionEntry.getVersionStamp();
assertEquals("Time stamp did NOT get updated by UPDATE_VERSION operation on LocalRegion", time, stamp.getVersionTimeStamp());
assertEquals(++entryVersion, stamp.getEntryVersion());
assertEquals(member, stamp.getMemberID());
assertEquals(dsid, stamp.getDistributedSystemId());
} finally {
region.destroyRegion();
cache.close();
}
}
use of org.apache.geode.internal.cache.versions.VersionSource in project geode by apache.
the class UpdateVersionJUnitTest method testUpdateVersionAfterUpdate.
@Test
public void testUpdateVersionAfterUpdate() {
Cache cache = new CacheFactory().set(MCAST_PORT, "0").create();
Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
try {
region.create("key-1", "value-1");
try {
Thread.sleep(10);
} catch (InterruptedException e) {
}
region.put("key-1", "value-2");
Entry entry = region.getEntry("key-1");
assertTrue(entry instanceof NonTXEntry);
RegionEntry regionEntry = ((NonTXEntry) entry).getRegionEntry();
VersionStamp stamp = regionEntry.getVersionStamp();
// Create a duplicate entry version tag from stamp with newer time-stamp.
VersionTag tag = VersionTag.create(stamp.getMemberID());
int entryVersion = stamp.getEntryVersion();
VersionSource member = stamp.getMemberID();
int dsid = stamp.getDistributedSystemId();
// Just in case if clock hasn't ticked.
long time = System.currentTimeMillis() + 1;
tag.setEntryVersion(entryVersion);
tag.setDistributedSystemId(dsid);
tag.setVersionTimeStamp(time);
tag.setIsGatewayTag(true);
assertTrue(region instanceof LocalRegion);
EntryEventImpl event = createNewEvent((LocalRegion) region, tag, entry.getKey());
((LocalRegion) region).basicUpdateEntryVersion(event);
// Verify the new stamp
entry = region.getEntry("key-1");
assertTrue(entry instanceof NonTXEntry);
regionEntry = ((NonTXEntry) entry).getRegionEntry();
stamp = regionEntry.getVersionStamp();
assertEquals("Time stamp did NOT get updated by UPDATE_VERSION operation on LocalRegion", time, stamp.getVersionTimeStamp());
assertEquals(++entryVersion, stamp.getEntryVersion());
assertEquals(member, stamp.getMemberID());
assertEquals(dsid, stamp.getDistributedSystemId());
} finally {
region.destroyRegion();
cache.close();
}
}
Aggregations