use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class GIIDeltaDUnitTest method testClearAfterChunkEntries.
/**
* P and R are peers, each holds a DR. Each does a few operations to make RVV=P6,R6, RVVGC=P0,R0
* for both members. R off line, then run P7. Restart R. It will trigger deltaGII to chunk entry
* P7(key1). After that, do clear(). Make sure R should not contain key1 after GII.
*/
// GEODE-1068: time sensitive, SLOW_DISTRIBUTION_MS, waitForCriterion,
@Category(FlakyTest.class)
// possible thread unsafe test hooks, async actions, depends on stats
@Test
public void testClearAfterChunkEntries() throws Throwable {
prepareForEachTest();
final DiskStoreID memberP = getMemberID(P);
final DiskStoreID memberR = getMemberID(R);
final long[] exceptionlist = { 4, 5 };
assertEquals(0, DistributedCacheOperation.SLOW_DISTRIBUTION_MS);
prepareCommonTestData(6);
// let r4,r5,r6 to succeed
doOnePut(R, 4, "key4");
doOneDestroy(R, 5, "key5");
doOnePut(R, 6, "key1");
// P's rvv=p6, gc=0
waitForToVerifyRVV(P, memberP, 6, null, 0);
// P's rvv=r6, gc=0
waitForToVerifyRVV(P, memberR, 6, null, 0);
// R's rvv=P6, gc=0
waitForToVerifyRVV(R, memberP, 6, null, 0);
// R's rvv=r6, gc=0
waitForToVerifyRVV(R, memberR, 6, null, 0);
// set tesk hook
R.invoke(new SerializableRunnable() {
public void run() {
Mycallback myAfterReceivedImageReply = new Mycallback(GIITestHookType.AfterReceivedImageReply, REGION_NAME);
InitialImageOperation.setGIITestHook(myAfterReceivedImageReply);
}
});
// shutdown R
byte[] R_rvv_bytes = getRVVByteArray(R, REGION_NAME);
closeCache(R);
// key1 will be the delta
doOnePut(P, 7, "key1");
// retart R
checkIfFullGII(P, REGION_NAME, R_rvv_bytes, false);
AsyncInvocation async3 = createDistributedRegionAsync(R);
// when chunk arrived, do clear()
waitForCallbackStarted(R, GIITestHookType.AfterReceivedImageReply);
doOneClear(P, 8);
R.invoke(() -> InitialImageOperation.resetGIITestHook(GIITestHookType.AfterReceivedImageReply, true));
async3.getResult(MAX_WAIT);
// clear() increased P's version with 1 to P8
// after clear, P and R's RVVGC == RVV
// P's rvv=r8, gc=8
waitForToVerifyRVV(P, memberP, 8, null, 8);
// P's rvv=r6, gc=6
waitForToVerifyRVV(P, memberR, 6, null, 6);
// retart R again to do fullGII
closeCache(R);
createDistributedRegion(R);
RegionVersionVector p_rvv = getRVV(P);
RegionVersionVector r_rvv = getRVV(R);
// after gii, rvv should be the same
assertSameRVV(p_rvv, r_rvv);
// R's rvv=r8, gc=8
waitForToVerifyRVV(R, memberP, 8, null, 8);
// R's rvv=r6, gc=6
waitForToVerifyRVV(R, memberR, 6, null, 6);
waitToVerifyKey(P, "key1", null);
waitToVerifyKey(R, "key1", null);
verifyDeltaSizeFromStats(R, 0, 1);
}
use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class GIIDeltaDUnitTest method testDeltaGIIWithSameRVV.
/**
* vm0 and vm1 are peers, each holds a DR. Each does a few operations to make RVV=P7,R6,
* RVVGC=P4,R0 for both members. vm1 becomes offline then restarts. The deltaGII should only
* exchange RVV. No need to send data from vm0 to vm1.
*/
@Test
public void testDeltaGIIWithSameRVV() throws Throwable {
prepareForEachTest();
final DiskStoreID memberP = getMemberID(P);
final DiskStoreID memberR = getMemberID(R);
assertEquals(0, DistributedCacheOperation.SLOW_DISTRIBUTION_MS);
prepareCommonTestData(6);
// force tombstone GC to let RVVGC to become P4:R0
forceGC(P, 2);
// P's rvv=p6, gc=4
waitForToVerifyRVV(P, memberP, 6, null, 4);
// P's rvv=r3, gc=0
waitForToVerifyRVV(P, memberR, 3, null, 0);
// let r4,r5,r6 to succeed
doOnePut(R, 4, "key4");
doOneDestroy(R, 5, "key5");
doOnePut(R, 6, "key1");
// let p7 to succeed
doOnePut(P, 7, "key1");
// P's rvv=p7, gc=4
waitForToVerifyRVV(P, memberP, 7, null, 4);
// P's rvv=r6, gc=0
waitForToVerifyRVV(P, memberR, 6, null, 0);
// P's rvv=p7, gc=4
waitForToVerifyRVV(R, memberP, 7, null, 4);
// P's rvv=r6, gc=0
waitForToVerifyRVV(R, memberR, 6, null, 0);
// now the rvv and rvvgc at P and R should be the same
// save R's rvv in byte array, check if it will be fullGII
byte[] R_rvv_bytes = getRVVByteArray(R, REGION_NAME);
// shutdown R and restart
closeCache(R);
checkIfFullGII(P, REGION_NAME, R_rvv_bytes, false);
createDistributedRegion(R);
// P's rvv=p7, gc=4
waitForToVerifyRVV(R, memberP, 7, null, 4);
// P's rvv=r6, gc=0
waitForToVerifyRVV(R, memberR, 6, null, 0);
RegionVersionVector p_rvv = getRVV(P);
RegionVersionVector r_rvv = getRVV(R);
// after gii, rvv should be the same
assertSameRVV(p_rvv, r_rvv);
// If fullGII, the key size in gii chunk is 4, i.e. key1,key3,key4,key5(tombstone). key2 is
// GCed.
// If delta GII, the key size should be 0
verifyDeltaSizeFromStats(R, 0, 1);
}
use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class GIIDeltaDUnitTest method testClearAfterSavedRVV.
/**
* P and R are peers, each holds a DR. Each does a few operations to make RVV=P6,R6, RVVGC=P0,R0
* for both members. R off line, then run P7. Restart R. When P's RVV arrives, do clear(). It
* should trigger fullGII. Make sure R should not contain key1 after GII.
*/
@Test
public void testClearAfterSavedRVV() throws Throwable {
prepareForEachTest();
final DiskStoreID memberP = getMemberID(P);
final DiskStoreID memberR = getMemberID(R);
final long[] exceptionlist = { 4, 5 };
assertEquals(0, DistributedCacheOperation.SLOW_DISTRIBUTION_MS);
prepareCommonTestData(6);
// let r4,r5,r6 to succeed
doOnePut(R, 4, "key4");
doOneDestroy(R, 5, "key5");
doOnePut(R, 6, "key1");
// P's rvv=p6, gc=0
waitForToVerifyRVV(P, memberP, 6, null, 0);
// P's rvv=r6, gc=0
waitForToVerifyRVV(P, memberR, 6, null, 0);
// R's rvv=P6, gc=0
waitForToVerifyRVV(R, memberP, 6, null, 0);
// R's rvv=r6, gc=0
waitForToVerifyRVV(R, memberR, 6, null, 0);
// set tesk hook
R.invoke(new SerializableRunnable() {
public void run() {
Mycallback myAfterSavedReceivedRVV = new Mycallback(GIITestHookType.AfterSavedReceivedRVV, REGION_NAME);
InitialImageOperation.setGIITestHook(myAfterSavedReceivedRVV);
}
});
// shutdown R
closeCache(R);
// key1 will be the delta
doOnePut(P, 7, "key1");
// retart R
AsyncInvocation async3 = createDistributedRegionAsync(R);
// when chunk arrived, do clear()
waitForCallbackStarted(R, GIITestHookType.AfterSavedReceivedRVV);
doOneClear(P, 8);
R.invoke(() -> InitialImageOperation.resetGIITestHook(GIITestHookType.AfterSavedReceivedRVV, true));
async3.join(MAX_WAIT);
// clear() increased P's version with 1 to P8
// after clear, P and R's RVVGC == RVV
// P's rvv=r8, gc=8
waitForToVerifyRVV(P, memberP, 8, null, 8);
// P's rvv=r6, gc=6
waitForToVerifyRVV(P, memberR, 6, null, 6);
RegionVersionVector p_rvv = getRVV(P);
RegionVersionVector r_rvv = getRVV(R);
// after gii, rvv should be the same
assertSameRVV(p_rvv, r_rvv);
// R's rvv=r8, gc=8
waitForToVerifyRVV(R, memberP, 8, null, 8);
// R's rvv=r6, gc=6
waitForToVerifyRVV(R, memberR, 6, null, 6);
waitToVerifyKey(P, "key1", null);
waitToVerifyKey(R, "key1", null);
verifyDeltaSizeFromStats(R, 0, 0);
}
use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class PersistentRecoveryOrderDUnitTest method getRVV.
protected RegionVersionVector getRVV(VM vm) throws IOException, ClassNotFoundException {
SerializableCallable createData = new SerializableCallable("getRVV") {
public Object call() throws Exception {
Cache cache = getCache();
LocalRegion region = (LocalRegion) cache.getRegion(REGION_NAME);
RegionVersionVector rvv = region.getVersionVector();
rvv = rvv.getCloneForTransmission();
HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
// Using gemfire serialization because
// RegionVersionVector is not java serializable
DataSerializer.writeObject(rvv, hdos);
return hdos.toByteArray();
}
};
byte[] result = (byte[]) vm.invoke(createData);
ByteArrayInputStream bais = new ByteArrayInputStream(result);
return DataSerializer.readObject(new DataInputStream(bais));
}
use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class MultiVMRegionTestCase method versionTestConcurrentEvents.
/**
* This tests the concurrency versioning system to ensure that event conflation happens correctly
* and that the statistic is being updated properly
*/
public void versionTestConcurrentEvents() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
// create replicated regions in VM 0 and 1, then perform concurrent ops
// on the same key while creating the region in VM2. Afterward make
// sure that all three regions are consistent
final String name = this.getUniqueName() + "-CC";
SerializableRunnable createRegion = new SerializableRunnable("Create Region") {
@Override
public void run() {
try {
RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
CCRegion = (LocalRegion) f.create(name);
} catch (CacheException ex) {
fail("While creating region", ex);
}
}
};
vm0.invoke(createRegion);
vm1.invoke(createRegion);
SerializableRunnable performOps = new SerializableRunnable("perform concurrent ops") {
@Override
public void run() {
try {
doOpsLoop(5000, false);
long events = CCRegion.getCachePerfStats().getConflatedEventsCount();
if (!CCRegion.getScope().isGlobal()) {
assertTrue("expected some event conflation", events > 0);
}
} catch (CacheException e) {
fail("while performing concurrent operations", e);
}
// } catch (InterruptedException e) {
// fail("someone interrupted my sleep");
// }
}
};
AsyncInvocation a0 = vm0.invokeAsync(performOps);
AsyncInvocation a1 = vm1.invokeAsync(performOps);
try {
Thread.sleep(500);
} catch (InterruptedException e) {
fail("sleep was interrupted");
}
vm2.invoke(createRegion);
boolean a0failed = waitForAsyncProcessing(a0, "expected some event conflation");
boolean a1failed = waitForAsyncProcessing(a1, "expected some event conflation");
if (a0failed && a1failed) {
fail("neither member saw event conflation - check stats for " + name);
}
// check consistency of the regions
Map r0Contents = (Map) vm0.invoke(() -> this.getCCRegionContents());
Map r1Contents = (Map) vm1.invoke(() -> this.getCCRegionContents());
Map r2Contents = (Map) vm2.invoke(() -> this.getCCRegionContents());
for (int i = 0; i < 10; i++) {
String key = "cckey" + i;
assertEquals("region contents are not consistent for " + key, r0Contents.get(key), r1Contents.get(key));
assertEquals("region contents are not consistent for " + key, r1Contents.get(key), r2Contents.get(key));
for (int subi = 1; subi < 3; subi++) {
String subkey = key + "-" + subi;
if (r0Contents.containsKey(subkey)) {
assertEquals("region contents are not consistent for " + subkey, r0Contents.get(subkey), r1Contents.get(subkey));
assertEquals("region contents are not consistent for " + subkey, r1Contents.get(subkey), r2Contents.get(subkey));
} else {
assertTrue(!r1Contents.containsKey(subkey));
}
}
}
if (!getRegionAttributes().getScope().isDistributedNoAck()) {
// no-ack doesn't support deltas
vm0.invoke(() -> this.clearCCRegion());
performOps = new SerializableRunnable("perform concurrent delta ops") {
@Override
public void run() {
try {
long stopTime = System.currentTimeMillis() + 5000;
Random ran = new Random(System.currentTimeMillis());
while (System.currentTimeMillis() < stopTime) {
for (int i = 0; i < 10; i++) {
CCRegion.put("cckey" + i, new DeltaValue("ccvalue" + ran.nextInt()));
}
}
long events = CCRegion.getCachePerfStats().getDeltaFailedUpdates();
assertTrue("expected some failed deltas", events > 0);
} catch (CacheException e) {
fail("while performing concurrent operations", e);
}
}
};
a0 = vm0.invokeAsync(performOps);
a1 = vm1.invokeAsync(performOps);
a0failed = waitForAsyncProcessing(a0, "expected some failed deltas");
a1failed = waitForAsyncProcessing(a1, "expected some failed deltas");
if (a0failed && a1failed) {
fail("neither member saw failed deltas - check stats for " + name);
}
// check consistency of the regions
r0Contents = (Map) vm0.invoke(() -> this.getCCRegionContents());
r1Contents = (Map) vm1.invoke(() -> this.getCCRegionContents());
r2Contents = (Map) vm2.invoke(() -> this.getCCRegionContents());
for (int i = 0; i < 10; i++) {
String key = "cckey" + i;
assertEquals("region contents are not consistent", r0Contents.get(key), r1Contents.get(key));
assertEquals("region contents are not consistent", r1Contents.get(key), r2Contents.get(key));
for (int subi = 1; subi < 3; subi++) {
String subkey = key + "-" + subi;
if (r0Contents.containsKey(subkey)) {
assertEquals("region contents are not consistent", r0Contents.get(subkey), r1Contents.get(subkey));
assertEquals("region contents are not consistent", r1Contents.get(subkey), r2Contents.get(subkey));
} else {
assertTrue(!r1Contents.containsKey(subkey));
}
}
}
// The region version vectors should now all be consistent with the version stamps in the
// entries.
InternalDistributedMember vm0Id = (InternalDistributedMember) vm0.invoke(() -> this.getMemberId());
InternalDistributedMember vm1Id = (InternalDistributedMember) vm1.invoke(() -> this.getMemberId());
InternalDistributedMember vm2Id = (InternalDistributedMember) vm2.invoke(() -> this.getMemberId());
long start = System.currentTimeMillis();
RegionVersionVector vm0vv = getVersionVector(vm0);
long end = System.currentTimeMillis();
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("version vector transmission took " + (end - start) + " ms");
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("vm0 vector = " + vm0vv);
RegionVersionVector vm1vv = getVersionVector(vm1);
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("vm1 vector = " + vm1vv);
RegionVersionVector vm2vv = getVersionVector(vm2);
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("vm2 vector = " + vm2vv);
Map<String, VersionTag> vm0Versions = (Map<String, VersionTag>) vm0.invoke(() -> this.getCCRegionVersions());
Map<String, VersionTag> vm1Versions = (Map<String, VersionTag>) vm1.invoke(() -> this.getCCRegionVersions());
Map<String, VersionTag> vm2Versions = (Map<String, VersionTag>) vm2.invoke(() -> this.getCCRegionVersions());
for (Map.Entry<String, VersionTag> entry : vm0Versions.entrySet()) {
VersionTag tag = entry.getValue();
tag.replaceNullIDs(vm0Id);
assertTrue(vm0Id + " should contain " + tag, vm0vv.contains(tag.getMemberID(), tag.getRegionVersion()));
assertTrue(vm1Id + " should contain " + tag, vm1vv.contains(tag.getMemberID(), tag.getRegionVersion()));
assertTrue(vm2Id + " should contain " + tag, vm2vv.contains(tag.getMemberID(), tag.getRegionVersion()));
}
for (Map.Entry<String, VersionTag> entry : vm1Versions.entrySet()) {
VersionTag tag = entry.getValue();
tag.replaceNullIDs(vm1Id);
assertTrue(vm0Id + " should contain " + tag, vm0vv.contains(tag.getMemberID(), tag.getRegionVersion()));
assertTrue(vm1Id + " should contain " + tag, vm1vv.contains(tag.getMemberID(), tag.getRegionVersion()));
assertTrue(vm2Id + " should contain " + tag, vm2vv.contains(tag.getMemberID(), tag.getRegionVersion()));
}
for (Map.Entry<String, VersionTag> entry : vm2Versions.entrySet()) {
VersionTag tag = entry.getValue();
tag.replaceNullIDs(vm2Id);
assertTrue(vm0Id + " should contain " + tag, vm0vv.contains(tag.getMemberID(), tag.getRegionVersion()));
assertTrue(vm1Id + " should contain " + tag, vm1vv.contains(tag.getMemberID(), tag.getRegionVersion()));
assertTrue(vm2Id + " should contain " + tag, vm2vv.contains(tag.getMemberID(), tag.getRegionVersion()));
}
}
}
Aggregations