use of org.apache.geode.internal.cache.LocalRegion.NonTXEntry in project geode by apache.
the class GIIDeltaDUnitTest method verifyTombstoneExist.
protected void verifyTombstoneExist(VM vm, final String key, final boolean expectExist, final boolean expectExpired) {
SerializableRunnable verify = new SerializableRunnable() {
private boolean doneVerify() {
Cache cache = getCache();
LocalRegion lr = (LocalRegion) getCache().getRegion(REGION_NAME);
NonTXEntry entry = (NonTXEntry) lr.getEntry(key, true);
if (expectExist) {
assertTrue(entry != null && entry.getRegionEntry().isTombstone());
}
System.out.println("GGG:new timeout=" + TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT);
if (entry == null || !entry.getRegionEntry().isTombstone()) {
return (false == expectExist);
} else {
long ts = entry.getRegionEntry().getVersionStamp().getVersionTimeStamp();
if (expectExpired) {
return (ts + TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT <= ((GemFireCacheImpl) cache).cacheTimeMillis());
} else {
return (true == expectExist);
}
}
}
public void run() {
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return doneVerify();
}
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, 10 * 1000, 200, true);
assertTrue(doneVerify());
}
};
vm.invoke(verify);
}
use of org.apache.geode.internal.cache.LocalRegion.NonTXEntry in project geode by apache.
the class UpdateVersionJUnitTest method testUpdateVersionAfterUpdate.
@Test
public void testUpdateVersionAfterUpdate() {
Cache cache = new CacheFactory().set(MCAST_PORT, "0").create();
Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
try {
region.create("key-1", "value-1");
try {
Thread.sleep(10);
} catch (InterruptedException e) {
}
region.put("key-1", "value-2");
Entry entry = region.getEntry("key-1");
assertTrue(entry instanceof NonTXEntry);
RegionEntry regionEntry = ((NonTXEntry) entry).getRegionEntry();
VersionStamp stamp = regionEntry.getVersionStamp();
// Create a duplicate entry version tag from stamp with newer time-stamp.
VersionTag tag = VersionTag.create(stamp.getMemberID());
int entryVersion = stamp.getEntryVersion();
VersionSource member = stamp.getMemberID();
int dsid = stamp.getDistributedSystemId();
// Just in case if clock hasn't ticked.
long time = System.currentTimeMillis() + 1;
tag.setEntryVersion(entryVersion);
tag.setDistributedSystemId(dsid);
tag.setVersionTimeStamp(time);
tag.setIsGatewayTag(true);
assertTrue(region instanceof LocalRegion);
EntryEventImpl event = createNewEvent((LocalRegion) region, tag, entry.getKey());
((LocalRegion) region).basicUpdateEntryVersion(event);
// Verify the new stamp
entry = region.getEntry("key-1");
assertTrue(entry instanceof NonTXEntry);
regionEntry = ((NonTXEntry) entry).getRegionEntry();
stamp = regionEntry.getVersionStamp();
assertEquals("Time stamp did NOT get updated by UPDATE_VERSION operation on LocalRegion", time, stamp.getVersionTimeStamp());
assertEquals(++entryVersion, stamp.getEntryVersion());
assertEquals(member, stamp.getMemberID());
assertEquals(dsid, stamp.getDistributedSystemId());
} finally {
region.destroyRegion();
cache.close();
}
}
use of org.apache.geode.internal.cache.LocalRegion.NonTXEntry in project geode by apache.
the class UpdateVersionJUnitTest method testUpdateVersionAfterDestroy.
@Test
public void testUpdateVersionAfterDestroy() {
Cache cache = new CacheFactory().set(MCAST_PORT, "0").create();
Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
try {
region.create("key-1", "value-1");
try {
Thread.sleep(10);
} catch (InterruptedException e) {
}
region.destroy("key-1");
assertTrue(region instanceof LocalRegion);
Entry entry = ((LocalRegion) region).getEntry("key-1", true);
assertTrue(entry instanceof NonTXEntry);
RegionEntry regionEntry = ((NonTXEntry) entry).getRegionEntry();
VersionStamp stamp = regionEntry.getVersionStamp();
// Create a duplicate entry version tag from stamp with newer time-stamp.
VersionTag tag = VersionTag.create(stamp.getMemberID());
int entryVersion = stamp.getEntryVersion();
VersionSource member = stamp.getMemberID();
int dsid = stamp.getDistributedSystemId();
long time = System.currentTimeMillis() + 1;
tag.setEntryVersion(entryVersion);
tag.setDistributedSystemId(dsid);
tag.setVersionTimeStamp(time);
tag.setIsGatewayTag(true);
EntryEventImpl event = createNewEvent((LocalRegion) region, tag, "key-1");
((LocalRegion) region).basicUpdateEntryVersion(event);
// Verify the new stamp
entry = ((LocalRegion) region).getEntry("key-1", true);
assertTrue(entry instanceof NonTXEntry);
regionEntry = ((NonTXEntry) entry).getRegionEntry();
stamp = regionEntry.getVersionStamp();
assertEquals("Time stamp did NOT get updated by UPDATE_VERSION operation on LocalRegion", time, stamp.getVersionTimeStamp());
assertEquals(++entryVersion, stamp.getEntryVersion());
assertEquals(member, stamp.getMemberID());
assertEquals(dsid, stamp.getDistributedSystemId());
} finally {
region.destroyRegion();
cache.close();
}
}
use of org.apache.geode.internal.cache.LocalRegion.NonTXEntry in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testWriteCorrectVersionToKrf.
/**
* Test that when we generate a krf, we write the version tag that matches the entry in the crf.
*/
@Test
public void testWriteCorrectVersionToKrf() throws Throwable {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(1);
final LocalRegion region = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
// The idea here is to do a bunch of puts with async persistence
// At some point the oplog will switch. At that time, we wait for a krf
// to be created and then throw an exception to shutdown the disk store.
//
// This should cause us to create a krf with some entries that have been
// modified since the crf was written and are still in the async queue.
//
// To avoid deadlocks, we need to mark that the oplog was switched,
// and then do the wait in the flusher thread.
// Setup the callbacks to wait for krf creation and throw an exception
IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException");
LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
try {
final CountDownLatch krfCreated = new CountDownLatch(1);
final AtomicBoolean oplogSwitched = new AtomicBoolean(false);
CacheObserverHolder.setInstance(new CacheObserverAdapter() {
@Override
public void afterKrfCreated() {
krfCreated.countDown();
}
@Override
public void afterWritingBytes() {
if (oplogSwitched.get()) {
try {
if (!krfCreated.await(3000, TimeUnit.SECONDS)) {
fail("KRF was not created in 30 seconds!");
}
} catch (InterruptedException e) {
fail("interrupted");
}
// Force a failure
throw new DiskAccessException();
}
}
@Override
public void afterSwitchingOplog() {
oplogSwitched.set(true);
}
});
// This is just to make sure the first oplog is not completely garbage.
region.put("testkey", "key");
// Do some puts to trigger an oplog roll.
try {
// Starting with a value of 1 means the value should match
// the region version for easier debugging.
int i = 1;
while (krfCreated.getCount() > 0) {
i++;
region.put("key" + (i % 3), i);
Thread.sleep(2);
}
} catch (CacheClosedException | DiskAccessException expected) {
// do nothing
}
// Wait for the region to be destroyed. The region won't be destroyed
// until the async flusher thread ends up switching oplogs
Wait.waitForCriterion(new WaitCriterion() {
@Override
public boolean done() {
return region.isDestroyed();
}
@Override
public String description() {
return "Region was not destroyed : " + region.isDestroyed();
}
}, 3000 * 1000, 100, true);
closeCache();
} finally {
ex.remove();
LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
CacheObserverHolder.setInstance(null);
}
// Get the version tags from the krf
LocalRegion recoveredRegion = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
VersionTag[] tagsFromKrf = new VersionTag[3];
for (int i = 0; i < 3; i++) {
NonTXEntry entry = (NonTXEntry) recoveredRegion.getEntry("key" + i);
tagsFromKrf[i] = entry.getRegionEntry().getVersionStamp().asVersionTag();
LogWriterUtils.getLogWriter().info("krfTag[" + i + "]=" + tagsFromKrf[i] + ",value=" + entry.getValue());
}
closeCache();
// Set a system property to skip recovering from the krf so we can
// get the version tag from the crf.
System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "true");
try {
// Get the version tags from the crf
recoveredRegion = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
VersionTag[] tagsFromCrf = new VersionTag[3];
for (int i = 0; i < 3; i++) {
NonTXEntry entry = (NonTXEntry) recoveredRegion.getEntry("key" + i);
tagsFromCrf[i] = entry.getRegionEntry().getVersionStamp().asVersionTag();
LogWriterUtils.getLogWriter().info("crfTag[" + i + "]=" + tagsFromCrf[i] + ",value=" + entry.getValue());
}
// Make sure the version tags from the krf and the crf match.
for (int i = 0; i < 3; i++) {
assertEquals(tagsFromCrf[i], tagsFromKrf[i]);
}
} finally {
System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "false");
}
}
Aggregations