Search in sources :

Example 1 with DiskRegionVersionVector

use of org.apache.geode.internal.cache.versions.DiskRegionVersionVector in project geode by apache.

the class OplogRVVJUnitTest method testRecoverRVV.

@Test
public void testRecoverRVV() throws UnknownHostException {
    final DiskInitFile df = context.mock(DiskInitFile.class);
    final LogWriterI18n logger = context.mock(LogWriterI18n.class);
    final GemFireCacheImpl cache = context.mock(GemFireCacheImpl.class);
    // Create a mock disk store impl.
    final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
    final StatisticsFactory sf = context.mock(StatisticsFactory.class);
    final DiskStoreID ownerId = DiskStoreID.random();
    final DiskStoreID m1 = DiskStoreID.random();
    final DiskStoreID m2 = DiskStoreID.random();
    final DiskRecoveryStore drs = context.mock(DiskRecoveryStore.class);
    context.checking(new Expectations() {

        {
            ignoring(sf);
            allowing(df).getOrCreateCanonicalId(m1);
            will(returnValue(1));
            allowing(df).getOrCreateCanonicalId(m2);
            will(returnValue(2));
            allowing(df).getOrCreateCanonicalId(ownerId);
            will(returnValue(3));
            allowing(df).getCanonicalObject(1);
            will(returnValue(m1));
            allowing(df).getCanonicalObject(2);
            will(returnValue(m2));
            allowing(df).getCanonicalObject(3);
            will(returnValue(ownerId));
            ignoring(df);
        }
    });
    DirectoryHolder dirHolder = new DirectoryHolder(sf, testDirectory, 0, 0);
    context.checking(new Expectations() {

        {
            ignoring(logger);
            allowing(cache).getLoggerI18n();
            will(returnValue(logger));
            allowing(cache).cacheTimeMillis();
            will(returnValue(System.currentTimeMillis()));
            allowing(parent).getCache();
            will(returnValue(cache));
            allowing(parent).getMaxOplogSizeInBytes();
            will(returnValue(10000L));
            allowing(parent).getName();
            will(returnValue("test"));
            allowing(parent).getStats();
            will(returnValue(new DiskStoreStats(sf, "stats")));
            allowing(parent).getDiskInitFile();
            will(returnValue(df));
            allowing(parent).getDiskStoreID();
            will(returnValue(DiskStoreID.random()));
        }
    });
    final DiskRegionVersionVector rvv = new DiskRegionVersionVector(ownerId);
    rvv.recordVersion(m1, 0);
    rvv.recordVersion(m1, 1);
    rvv.recordVersion(m1, 2);
    rvv.recordVersion(m1, 10);
    rvv.recordVersion(m1, 7);
    rvv.recordVersion(m2, 0);
    rvv.recordVersion(m2, 1);
    rvv.recordVersion(m2, 2);
    rvv.recordGCVersion(m1, 1);
    rvv.recordGCVersion(m2, 0);
    // create the oplog
    final AbstractDiskRegion diskRegion = context.mock(AbstractDiskRegion.class);
    final PersistentOplogSet oplogSet = context.mock(PersistentOplogSet.class);
    final Map<Long, AbstractDiskRegion> map = new HashMap<Long, AbstractDiskRegion>();
    map.put(5L, diskRegion);
    context.checking(new Expectations() {

        {
            allowing(diskRegion).getRegionVersionVector();
            will(returnValue(rvv));
            allowing(diskRegion).getRVVTrusted();
            will(returnValue(true));
            allowing(parent).getAllDiskRegions();
            will(returnValue(map));
            allowing(oplogSet).getCurrentlyRecovering(5L);
            will(returnValue(drs));
            allowing(oplogSet).getParent();
            will(returnValue(parent));
            ignoring(oplogSet);
            ignoring(parent);
            allowing(diskRegion).getFlags();
            will(returnValue(EnumSet.of(DiskRegionFlag.IS_WITH_VERSIONING)));
        }
    });
    Map<Long, AbstractDiskRegion> regions = parent.getAllDiskRegions();
    Oplog oplog = new Oplog(1, oplogSet, dirHolder);
    oplog.close();
    context.checking(new Expectations() {

        {
            one(drs).recordRecoveredGCVersion(m1, 1);
            one(drs).recordRecoveredGCVersion(m2, 0);
            one(drs).recordRecoveredVersonHolder(ownerId, rvv.getMemberToVersion().get(ownerId), true);
            one(drs).recordRecoveredVersonHolder(m1, rvv.getMemberToVersion().get(m1), true);
            one(drs).recordRecoveredVersonHolder(m2, rvv.getMemberToVersion().get(m2), true);
            one(drs).setRVVTrusted(true);
        }
    });
    oplog = new Oplog(1, oplogSet);
    Collection<File> drfFiles = FileUtils.listFiles(testDirectory, new String[] { "drf" }, true);
    assertEquals(1, drfFiles.size());
    Collection<File> crfFiles = FileUtils.listFiles(testDirectory, new String[] { "crf" }, true);
    assertEquals(1, crfFiles.size());
    oplog.addRecoveredFile(drfFiles.iterator().next(), dirHolder);
    oplog.addRecoveredFile(crfFiles.iterator().next(), dirHolder);
    OplogEntryIdSet deletedIds = new OplogEntryIdSet();
    oplog.recoverDrf(deletedIds, false, true);
    oplog.recoverCrf(deletedIds, true, true, false, Collections.singleton(oplog), true);
    context.assertIsSatisfied();
}
Also used : Expectations(org.jmock.Expectations) DiskRegionVersionVector(org.apache.geode.internal.cache.versions.DiskRegionVersionVector) HashMap(java.util.HashMap) OplogEntryIdSet(org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) LogWriterI18n(org.apache.geode.i18n.LogWriterI18n) StatisticsFactory(org.apache.geode.StatisticsFactory) DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) File(java.io.File) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 2 with DiskRegionVersionVector

use of org.apache.geode.internal.cache.versions.DiskRegionVersionVector in project geode by apache.

the class DiskInitFile method cmnClearRegion.

public void cmnClearRegion(long drId, ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>> memberToVersion) {
    DiskRegionView drv = getDiskRegionById(drId);
    if (drv.getClearRVV() == null) {
        this.ifLiveRecordCount++;
    }
    // otherwise previous clear is cancelled so don't change liveRecordCount
    this.ifTotalRecordCount++;
    DiskStoreID ownerId = parent.getDiskStoreID();
    // Create a fake RVV for clear purposes. We only need to memberToVersion information
    RegionVersionHolder<DiskStoreID> ownerExceptions = memberToVersion.remove(ownerId);
    long ownerVersion = ownerExceptions == null ? 0 : ownerExceptions.getVersion();
    RegionVersionVector rvv = new DiskRegionVersionVector(ownerId, memberToVersion, ownerVersion, new ConcurrentHashMap(), 0L, false, ownerExceptions);
    drv.setClearRVV(rvv);
}
Also used : DiskRegionVersionVector(org.apache.geode.internal.cache.versions.DiskRegionVersionVector) DiskRegionVersionVector(org.apache.geode.internal.cache.versions.DiskRegionVersionVector) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector) DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Aggregations

DiskStoreID (org.apache.geode.internal.cache.persistence.DiskStoreID)2 DiskRegionVersionVector (org.apache.geode.internal.cache.versions.DiskRegionVersionVector)2 File (java.io.File)1 HashMap (java.util.HashMap)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 StatisticsFactory (org.apache.geode.StatisticsFactory)1 LogWriterI18n (org.apache.geode.i18n.LogWriterI18n)1 OplogEntryIdSet (org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet)1 DiskRecoveryStore (org.apache.geode.internal.cache.persistence.DiskRecoveryStore)1 DiskRegionView (org.apache.geode.internal.cache.persistence.DiskRegionView)1 RegionVersionVector (org.apache.geode.internal.cache.versions.RegionVersionVector)1 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)1 Expectations (org.jmock.Expectations)1 Test (org.junit.Test)1