Search in sources :

Example 11 with DiskStoreID

use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.

the class RegionVersionVectorJUnitTest method test48066_1.

@Test
public void test48066_1() {
    DiskStoreID id0 = new DiskStoreID(0, 0);
    DiskRegionVersionVector rvv0 = new DiskRegionVersionVector(id0);
    for (int i = 1; i <= 3; i++) {
        rvv0.recordVersion(id0, i);
    }
    System.out.println("rvv0=" + rvv0.fullToString());
    DiskRegionVersionVector rvv1 = (DiskRegionVersionVector) rvv0.getCloneForTransmission();
    System.out.println("after clone, rvv1=" + rvv1.fullToString());
    DiskRegionVersionVector rvv2 = new DiskRegionVersionVector(id0);
    for (int i = 1; i <= 10; i++) {
        rvv2.recordVersion(id0, i);
    }
    rvv2.recordVersions(rvv1);
    System.out.println("after init, rvv2=" + rvv2.fullToString());
    rvv2.recordVersion(id0, 4);
    System.out.println("after record 4, rvv2=" + rvv2.fullToString());
    assertEquals(4, rvv2.getCurrentVersion());
    rvv2.recordVersion(id0, 7);
    System.out.println("after record 7, rvv2=" + rvv2.fullToString());
    assertEquals(7, rvv2.getCurrentVersion());
}
Also used : DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) Test(org.junit.Test) UnitTest(org.apache.geode.test.junit.categories.UnitTest)

Example 12 with DiskStoreID

use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.

the class OplogRVVJUnitTest method testRecoverRVV.

@Test
public void testRecoverRVV() throws UnknownHostException {
    final DiskInitFile df = context.mock(DiskInitFile.class);
    final LogWriterI18n logger = context.mock(LogWriterI18n.class);
    final GemFireCacheImpl cache = context.mock(GemFireCacheImpl.class);
    // Create a mock disk store impl.
    final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
    final StatisticsFactory sf = context.mock(StatisticsFactory.class);
    final DiskStoreID ownerId = DiskStoreID.random();
    final DiskStoreID m1 = DiskStoreID.random();
    final DiskStoreID m2 = DiskStoreID.random();
    final DiskRecoveryStore drs = context.mock(DiskRecoveryStore.class);
    context.checking(new Expectations() {

        {
            ignoring(sf);
            allowing(df).getOrCreateCanonicalId(m1);
            will(returnValue(1));
            allowing(df).getOrCreateCanonicalId(m2);
            will(returnValue(2));
            allowing(df).getOrCreateCanonicalId(ownerId);
            will(returnValue(3));
            allowing(df).getCanonicalObject(1);
            will(returnValue(m1));
            allowing(df).getCanonicalObject(2);
            will(returnValue(m2));
            allowing(df).getCanonicalObject(3);
            will(returnValue(ownerId));
            ignoring(df);
        }
    });
    DirectoryHolder dirHolder = new DirectoryHolder(sf, testDirectory, 0, 0);
    context.checking(new Expectations() {

        {
            ignoring(logger);
            allowing(cache).getLoggerI18n();
            will(returnValue(logger));
            allowing(cache).cacheTimeMillis();
            will(returnValue(System.currentTimeMillis()));
            allowing(parent).getCache();
            will(returnValue(cache));
            allowing(parent).getMaxOplogSizeInBytes();
            will(returnValue(10000L));
            allowing(parent).getName();
            will(returnValue("test"));
            allowing(parent).getStats();
            will(returnValue(new DiskStoreStats(sf, "stats")));
            allowing(parent).getDiskInitFile();
            will(returnValue(df));
            allowing(parent).getDiskStoreID();
            will(returnValue(DiskStoreID.random()));
        }
    });
    final DiskRegionVersionVector rvv = new DiskRegionVersionVector(ownerId);
    rvv.recordVersion(m1, 0);
    rvv.recordVersion(m1, 1);
    rvv.recordVersion(m1, 2);
    rvv.recordVersion(m1, 10);
    rvv.recordVersion(m1, 7);
    rvv.recordVersion(m2, 0);
    rvv.recordVersion(m2, 1);
    rvv.recordVersion(m2, 2);
    rvv.recordGCVersion(m1, 1);
    rvv.recordGCVersion(m2, 0);
    // create the oplog
    final AbstractDiskRegion diskRegion = context.mock(AbstractDiskRegion.class);
    final PersistentOplogSet oplogSet = context.mock(PersistentOplogSet.class);
    final Map<Long, AbstractDiskRegion> map = new HashMap<Long, AbstractDiskRegion>();
    map.put(5L, diskRegion);
    context.checking(new Expectations() {

        {
            allowing(diskRegion).getRegionVersionVector();
            will(returnValue(rvv));
            allowing(diskRegion).getRVVTrusted();
            will(returnValue(true));
            allowing(parent).getAllDiskRegions();
            will(returnValue(map));
            allowing(oplogSet).getCurrentlyRecovering(5L);
            will(returnValue(drs));
            allowing(oplogSet).getParent();
            will(returnValue(parent));
            ignoring(oplogSet);
            ignoring(parent);
            allowing(diskRegion).getFlags();
            will(returnValue(EnumSet.of(DiskRegionFlag.IS_WITH_VERSIONING)));
        }
    });
    Map<Long, AbstractDiskRegion> regions = parent.getAllDiskRegions();
    Oplog oplog = new Oplog(1, oplogSet, dirHolder);
    oplog.close();
    context.checking(new Expectations() {

        {
            one(drs).recordRecoveredGCVersion(m1, 1);
            one(drs).recordRecoveredGCVersion(m2, 0);
            one(drs).recordRecoveredVersonHolder(ownerId, rvv.getMemberToVersion().get(ownerId), true);
            one(drs).recordRecoveredVersonHolder(m1, rvv.getMemberToVersion().get(m1), true);
            one(drs).recordRecoveredVersonHolder(m2, rvv.getMemberToVersion().get(m2), true);
            one(drs).setRVVTrusted(true);
        }
    });
    oplog = new Oplog(1, oplogSet);
    Collection<File> drfFiles = FileUtils.listFiles(testDirectory, new String[] { "drf" }, true);
    assertEquals(1, drfFiles.size());
    Collection<File> crfFiles = FileUtils.listFiles(testDirectory, new String[] { "crf" }, true);
    assertEquals(1, crfFiles.size());
    oplog.addRecoveredFile(drfFiles.iterator().next(), dirHolder);
    oplog.addRecoveredFile(crfFiles.iterator().next(), dirHolder);
    OplogEntryIdSet deletedIds = new OplogEntryIdSet();
    oplog.recoverDrf(deletedIds, false, true);
    oplog.recoverCrf(deletedIds, true, true, false, Collections.singleton(oplog), true);
    context.assertIsSatisfied();
}
Also used : Expectations(org.jmock.Expectations) DiskRegionVersionVector(org.apache.geode.internal.cache.versions.DiskRegionVersionVector) HashMap(java.util.HashMap) OplogEntryIdSet(org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) LogWriterI18n(org.apache.geode.i18n.LogWriterI18n) StatisticsFactory(org.apache.geode.StatisticsFactory) DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) File(java.io.File) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 13 with DiskStoreID

use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.

the class OrderedTombstoneMapJUnitTest method test.

@Test
public void test() {
    OrderedTombstoneMap<String> map = new OrderedTombstoneMap<String>();
    DiskStoreID id1 = DiskStoreID.random();
    DiskStoreID id2 = DiskStoreID.random();
    map.put(createVersionTag(id1, 1, 7), "one");
    map.put(createVersionTag(id1, 3, 2), "two");
    map.put(createVersionTag(id2, 3, 5), "three");
    map.put(createVersionTag(id1, 2, 3), "four");
    map.put(createVersionTag(id1, 0, 2), "five");
    map.put(createVersionTag(id2, 4, 4), "six");
    // Now make sure we get the entries in the order we expect (ordered by version tag with a member
    // and by timestampe otherwise.
    assertEquals("five", map.take().getValue());
    assertEquals("three", map.take().getValue());
    assertEquals("six", map.take().getValue());
    assertEquals("one", map.take().getValue());
    assertEquals("four", map.take().getValue());
    assertEquals("two", map.take().getValue());
}
Also used : DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) UnitTest(org.apache.geode.test.junit.categories.UnitTest) Test(org.junit.Test)

Example 14 with DiskStoreID

use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.

the class GIIDeltaDUnitTest method testUnfinishedOpsWithoutExceptionList.

/**
   * Let R4, R5 unfinish, but R5 is the last operation from R. So P's RVV is still P:x,R3, without
   * exception list. But actually R4, R5 are unfinished ops by all means.
   */
@Test
public void testUnfinishedOpsWithoutExceptionList() throws Throwable {
    prepareForEachTest();
    final DiskStoreID memberP = getMemberID(P);
    final DiskStoreID memberR = getMemberID(R);
    assertEquals(0, DistributedCacheOperation.SLOW_DISTRIBUTION_MS);
    prepareCommonTestData(6);
    VersionTag expect_tag = getVersionTag(R, "key5");
    final long[] exceptionlist = { 4, 5 };
    R.invoke(() -> GIIDeltaDUnitTest.slowGII(exceptionlist));
    AsyncInvocation async1 = doOnePutAsync(R, 4, "key4");
    // P's rvv=r4, gc=0
    waitForToVerifyRVV(R, memberR, 4, null, 0);
    AsyncInvocation async2 = doOneDestroyAsync(R, 5, "key5");
    // P's rvv=r5, gc=0
    waitForToVerifyRVV(R, memberR, 5, null, 0);
    // P should have unfinished ops R4,R5, but they did not show up in exception list
    // P's rvv=r3, gc=0
    waitForToVerifyRVV(P, memberR, 3, null, 0);
    // let p7 to succeed
    doOnePut(P, 7, "key1");
    // P's rvv=p7, gc=0
    waitForToVerifyRVV(P, memberP, 7, null, 0);
    // P's rvv=r3, gc=0
    waitForToVerifyRVV(P, memberR, 3, null, 0);
    // R's rvv=p7, gc=0
    waitForToVerifyRVV(R, memberP, 7, null, 0);
    // R's rvv=r3, gc=0
    waitForToVerifyRVV(R, memberR, 5, null, 0);
    // now P's rvv=P7,R3, R's RVV=P7,R5
    // shutdown R and restart
    byte[] R_rvv_bytes = getRVVByteArray(R, REGION_NAME);
    closeCache(R);
    checkIfFullGII(P, REGION_NAME, R_rvv_bytes, false);
    createDistributedRegion(R);
    // P's rvv=p7, gc=0
    waitForToVerifyRVV(R, memberP, 7, null, 0);
    // P's rvv=r3, gc=0
    waitForToVerifyRVV(R, memberR, 3, exceptionlist, 0);
    RegionVersionVector p_rvv = getRVV(P);
    RegionVersionVector r_rvv = getRVV(R);
    // after gii, rvv should be the same
    assertSameRVV(p_rvv, r_rvv);
    // full GII chunk has 4 keys: key1,2(tombstone),3,5
    // delta GII chunk has 1 key, i.e. (key5(T) which is unfinished operation)
    verifyDeltaSizeFromStats(R, 1, 1);
    // verify unfinished op for key5 is revoked
    waitToVerifyKey(R, "key5", generateValue(R));
    VersionTag tag = getVersionTag(R, "key5");
    assertTrue(expect_tag.equals(tag));
    // shutdown R again and restart, to verify localVersion=5 will be saved and recovered
    closeCache(R);
    createDistributedRegion(R);
    // P will receive R6 and have exception R6(3-6)
    // r6 will pass
    doOnePut(R, 6, "key1");
    // R's rvv=r6, gc=0
    waitForToVerifyRVV(R, memberR, 6, exceptionlist, 0);
    // P's rvv=r6(3-6), gc=0
    waitForToVerifyRVV(P, memberR, 6, exceptionlist, 0);
}
Also used : VersionTag(org.apache.geode.internal.cache.versions.VersionTag) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector) DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 15 with DiskStoreID

use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.

the class GIIDeltaDUnitTest method testDeltaGIIWithOnlyUnfinishedOp.

/**
   * vm0 and vm1 are peers, each holds a DR. create some exception list. Before GII, P's RVV is
   * P6,R6(3-6), R's RVV is P6,R6, RVVGC are both P4,R0 vm1 becomes offline then restarts. The
   * deltaGII should send delta which only contains unfinished operation R4,R5
   */
@Test
public void testDeltaGIIWithOnlyUnfinishedOp() throws Throwable {
    prepareForEachTest();
    final DiskStoreID memberP = getMemberID(P);
    final DiskStoreID memberR = getMemberID(R);
    final long[] exceptionlist = { 4, 5 };
    assertEquals(0, DistributedCacheOperation.SLOW_DISTRIBUTION_MS);
    prepareCommonTestData(6);
    VersionTag expect_tag = getVersionTag(R, "key5");
    // force tombstone GC to let RVVGC to become P4:R0
    forceGC(P, 2);
    // P's rvv=p6, gc=4
    waitForToVerifyRVV(P, memberP, 6, null, 4);
    // P's rvv=r3, gc=0
    waitForToVerifyRVV(P, memberR, 3, null, 0);
    createUnfinishedOperationsR4R5();
    // now P's cache still only has key1, key3, key5
    byte[] R_rvv_bytes = getRVVByteArray(R, REGION_NAME);
    closeCache(R);
    // restart and gii
    checkIfFullGII(P, REGION_NAME, R_rvv_bytes, false);
    createDistributedRegion(R);
    // P's rvv=p6, gc=4
    waitForToVerifyRVV(P, memberP, 6, null, 4);
    // P's rvv=r6, gc=0
    waitForToVerifyRVV(P, memberR, 6, exceptionlist, 0);
    // R's rvv=p6, gc=4
    waitForToVerifyRVV(R, memberP, 6, null, 4);
    // R's rvv=r6, gc=0
    waitForToVerifyRVV(R, memberR, 6, exceptionlist, 0);
    RegionVersionVector p_rvv = getRVV(P);
    RegionVersionVector r_rvv = getRVV(R);
    assertSameRVV(p_rvv, r_rvv);
    // If fullGII, the key size in gii chunk is 3, i.e. key1,key3,key5. key2 is GCed.
    // If delta GII, the key size should be 1 (key5(T) which is unfinished operation)
    verifyDeltaSizeFromStats(R, 1, 1);
    // verify unfinished op for key5 is revoked
    waitToVerifyKey(R, "key5", generateValue(R));
    VersionTag tag = getVersionTag(R, "key5");
    assertTrue(expect_tag.equals(tag));
    // restart P, since R has received exceptionlist R4,R5 from P
    closeCache(P);
    createDistributedRegion(P);
    // P's rvv=p6, gc=4
    waitForToVerifyRVV(P, memberP, 6, null, 4);
    // P's rvv=r6, gc=0
    waitForToVerifyRVV(P, memberR, 6, exceptionlist, 0);
    // If fullGII, the key size in gii chunk is 3, i.e. key1,key3,key5. key4 is removed as
    // unfinished op
    // If deltaGII, the key size should be 0
    verifyDeltaSizeFromStats(P, 0, 1);
    // restart R, to make sure the unfinished op is handled correctly
    // for bug 47616
    forceGC(R, 1);
    // P's rvv=R6, gc=5
    waitForToVerifyRVV(P, memberR, 6, null, 5);
    // P's rvv=R6, gc=5
    waitForToVerifyRVV(R, memberR, 6, null, 5);
    closeCache(R);
    createDistributedRegion(R);
    // If fullGII, the key size in gii chunk is 3, i.e. key1,key3,key5. key4 is removed as
    // unfinished op
    // If deltaGII, the key size should be 0
    verifyDeltaSizeFromStats(R, 0, 1);
    // verify unfinished op for key5 is revoked
    waitToVerifyKey(R, "key5", generateValue(R));
    tag = getVersionTag(R, "key5");
    assertTrue(expect_tag.equals(tag));
}
Also used : VersionTag(org.apache.geode.internal.cache.versions.VersionTag) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector) DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Aggregations

DiskStoreID (org.apache.geode.internal.cache.persistence.DiskStoreID)53 Test (org.junit.Test)38 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)26 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)26 RegionVersionVector (org.apache.geode.internal.cache.versions.RegionVersionVector)19 UnitTest (org.apache.geode.test.junit.categories.UnitTest)11 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)10 HashMap (java.util.HashMap)5 Category (org.junit.experimental.categories.Category)4 Map (java.util.Map)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 DiskAccessException (org.apache.geode.cache.DiskAccessException)3 PersistentMemberID (org.apache.geode.internal.cache.persistence.PersistentMemberID)3 Int2ObjectMap (it.unimi.dsi.fastutil.ints.Int2ObjectMap)2 Int2ObjectOpenHashMap (it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap)2 IOException (java.io.IOException)2 HashSet (java.util.HashSet)2 Set (java.util.Set)2 HeapDataOutputStream (org.apache.geode.internal.HeapDataOutputStream)2 PersistentMemberPattern (org.apache.geode.internal.cache.persistence.PersistentMemberPattern)2