Search in sources :

Example 1 with OplogEntryIdSet

use of org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet in project geode by apache.

the class OplogRVVJUnitTest method testRecoverRVV.

@Test
public void testRecoverRVV() throws UnknownHostException {
    final DiskInitFile df = context.mock(DiskInitFile.class);
    final LogWriterI18n logger = context.mock(LogWriterI18n.class);
    final GemFireCacheImpl cache = context.mock(GemFireCacheImpl.class);
    // Create a mock disk store impl.
    final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
    final StatisticsFactory sf = context.mock(StatisticsFactory.class);
    final DiskStoreID ownerId = DiskStoreID.random();
    final DiskStoreID m1 = DiskStoreID.random();
    final DiskStoreID m2 = DiskStoreID.random();
    final DiskRecoveryStore drs = context.mock(DiskRecoveryStore.class);
    context.checking(new Expectations() {

        {
            ignoring(sf);
            allowing(df).getOrCreateCanonicalId(m1);
            will(returnValue(1));
            allowing(df).getOrCreateCanonicalId(m2);
            will(returnValue(2));
            allowing(df).getOrCreateCanonicalId(ownerId);
            will(returnValue(3));
            allowing(df).getCanonicalObject(1);
            will(returnValue(m1));
            allowing(df).getCanonicalObject(2);
            will(returnValue(m2));
            allowing(df).getCanonicalObject(3);
            will(returnValue(ownerId));
            ignoring(df);
        }
    });
    DirectoryHolder dirHolder = new DirectoryHolder(sf, testDirectory, 0, 0);
    context.checking(new Expectations() {

        {
            ignoring(logger);
            allowing(cache).getLoggerI18n();
            will(returnValue(logger));
            allowing(cache).cacheTimeMillis();
            will(returnValue(System.currentTimeMillis()));
            allowing(parent).getCache();
            will(returnValue(cache));
            allowing(parent).getMaxOplogSizeInBytes();
            will(returnValue(10000L));
            allowing(parent).getName();
            will(returnValue("test"));
            allowing(parent).getStats();
            will(returnValue(new DiskStoreStats(sf, "stats")));
            allowing(parent).getDiskInitFile();
            will(returnValue(df));
            allowing(parent).getDiskStoreID();
            will(returnValue(DiskStoreID.random()));
        }
    });
    final DiskRegionVersionVector rvv = new DiskRegionVersionVector(ownerId);
    rvv.recordVersion(m1, 0);
    rvv.recordVersion(m1, 1);
    rvv.recordVersion(m1, 2);
    rvv.recordVersion(m1, 10);
    rvv.recordVersion(m1, 7);
    rvv.recordVersion(m2, 0);
    rvv.recordVersion(m2, 1);
    rvv.recordVersion(m2, 2);
    rvv.recordGCVersion(m1, 1);
    rvv.recordGCVersion(m2, 0);
    // create the oplog
    final AbstractDiskRegion diskRegion = context.mock(AbstractDiskRegion.class);
    final PersistentOplogSet oplogSet = context.mock(PersistentOplogSet.class);
    final Map<Long, AbstractDiskRegion> map = new HashMap<Long, AbstractDiskRegion>();
    map.put(5L, diskRegion);
    context.checking(new Expectations() {

        {
            allowing(diskRegion).getRegionVersionVector();
            will(returnValue(rvv));
            allowing(diskRegion).getRVVTrusted();
            will(returnValue(true));
            allowing(parent).getAllDiskRegions();
            will(returnValue(map));
            allowing(oplogSet).getCurrentlyRecovering(5L);
            will(returnValue(drs));
            allowing(oplogSet).getParent();
            will(returnValue(parent));
            ignoring(oplogSet);
            ignoring(parent);
            allowing(diskRegion).getFlags();
            will(returnValue(EnumSet.of(DiskRegionFlag.IS_WITH_VERSIONING)));
        }
    });
    Map<Long, AbstractDiskRegion> regions = parent.getAllDiskRegions();
    Oplog oplog = new Oplog(1, oplogSet, dirHolder);
    oplog.close();
    context.checking(new Expectations() {

        {
            one(drs).recordRecoveredGCVersion(m1, 1);
            one(drs).recordRecoveredGCVersion(m2, 0);
            one(drs).recordRecoveredVersonHolder(ownerId, rvv.getMemberToVersion().get(ownerId), true);
            one(drs).recordRecoveredVersonHolder(m1, rvv.getMemberToVersion().get(m1), true);
            one(drs).recordRecoveredVersonHolder(m2, rvv.getMemberToVersion().get(m2), true);
            one(drs).setRVVTrusted(true);
        }
    });
    oplog = new Oplog(1, oplogSet);
    Collection<File> drfFiles = FileUtils.listFiles(testDirectory, new String[] { "drf" }, true);
    assertEquals(1, drfFiles.size());
    Collection<File> crfFiles = FileUtils.listFiles(testDirectory, new String[] { "crf" }, true);
    assertEquals(1, crfFiles.size());
    oplog.addRecoveredFile(drfFiles.iterator().next(), dirHolder);
    oplog.addRecoveredFile(crfFiles.iterator().next(), dirHolder);
    OplogEntryIdSet deletedIds = new OplogEntryIdSet();
    oplog.recoverDrf(deletedIds, false, true);
    oplog.recoverCrf(deletedIds, true, true, false, Collections.singleton(oplog), true);
    context.assertIsSatisfied();
}
Also used : Expectations(org.jmock.Expectations) DiskRegionVersionVector(org.apache.geode.internal.cache.versions.DiskRegionVersionVector) HashMap(java.util.HashMap) OplogEntryIdSet(org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) LogWriterI18n(org.apache.geode.i18n.LogWriterI18n) StatisticsFactory(org.apache.geode.StatisticsFactory) DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) File(java.io.File) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 2 with OplogEntryIdSet

use of org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet in project geode by apache.

the class OplogEntryIdSetJUnitTest method testBasics.

@Test
public void testBasics() {
    OplogEntryIdSet s = new OplogEntryIdSet();
    for (long i = 1; i < 777777; i++) {
        assertEquals(false, s.contains(i));
    }
    for (long i = 1; i < 777777; i++) {
        s.add(i);
    }
    for (long i = 1; i < 777777; i++) {
        assertEquals(true, s.contains(i));
    }
    try {
        s.add(DiskStoreImpl.INVALID_ID);
        fail("expected IllegalArgumentException");
    } catch (IllegalArgumentException expected) {
    }
    assertEquals(false, s.contains(0));
    assertEquals(false, s.contains(0x00000000FFFFFFFFL));
    s.add(0x00000000FFFFFFFFL);
    assertEquals(true, s.contains(0x00000000FFFFFFFFL));
    for (long i = 0x00000000FFFFFFFFL + 1; i < 0x00000000FFFFFFFFL + 777777; i++) {
        assertEquals(false, s.contains(i));
    }
    for (long i = 0x00000000FFFFFFFFL + 1; i < 0x00000000FFFFFFFFL + 777777; i++) {
        s.add(i);
    }
    for (long i = 0x00000000FFFFFFFFL + 1; i < 0x00000000FFFFFFFFL + 777777; i++) {
        assertEquals(true, s.contains(i));
    }
    for (long i = 1; i < 777777; i++) {
        assertEquals(true, s.contains(i));
    }
    assertEquals(false, s.contains(Long.MAX_VALUE));
    s.add(Long.MAX_VALUE);
    assertEquals(true, s.contains(Long.MAX_VALUE));
    assertEquals(false, s.contains(Long.MIN_VALUE));
    s.add(Long.MIN_VALUE);
    assertEquals(true, s.contains(Long.MIN_VALUE));
}
Also used : OplogEntryIdSet(org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet) UnitTest(org.apache.geode.test.junit.categories.UnitTest) Test(org.junit.Test)

Example 3 with OplogEntryIdSet

use of org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet in project geode by apache.

the class PersistentOplogSet method recoverOplogs.

private long recoverOplogs(long byteCount) {
    OplogEntryIdSet deletedIds = new OplogEntryIdSet();
    TreeSet<Oplog> oplogSet = getSortedOplogs();
    Set<Oplog> oplogsNeedingValueRecovery = new HashSet<Oplog>();
    if (!this.alreadyRecoveredOnce.get()) {
        if (getChild() != null && !getChild().hasBeenUsed()) {
            // Then remove the current child since it is empty
            // and does not need to be recovered from
            // and it is important to not call initAfterRecovery on it.
            oplogSet.remove(getChild());
        }
    }
    if (oplogSet.size() > 0) {
        long startOpLogRecovery = System.currentTimeMillis();
        // first figure out all entries that have been destroyed
        boolean latestOplog = true;
        for (Oplog oplog : oplogSet) {
            byteCount += oplog.recoverDrf(deletedIds, this.alreadyRecoveredOnce.get(), latestOplog);
            latestOplog = false;
            if (!this.alreadyRecoveredOnce.get()) {
                updateOplogEntryId(oplog.getMaxRecoveredOplogEntryId());
            }
        }
        parent.incDeadRecordCount(deletedIds.size());
        // now figure out live entries
        latestOplog = true;
        for (Oplog oplog : oplogSet) {
            long startOpLogRead = parent.getStats().startOplogRead();
            long bytesRead = oplog.recoverCrf(deletedIds, // @todo make recoverValues per region
            recoverValues(), recoverValuesSync(), this.alreadyRecoveredOnce.get(), oplogsNeedingValueRecovery, latestOplog);
            latestOplog = false;
            if (!this.alreadyRecoveredOnce.get()) {
                updateOplogEntryId(oplog.getMaxRecoveredOplogEntryId());
            }
            byteCount += bytesRead;
            parent.getStats().endOplogRead(startOpLogRead, bytesRead);
            // Used for offline export
            for (DiskRecoveryStore drs : this.currentRecoveryMap.values()) {
                drs.getDiskRegionView().oplogRecovered(oplog.oplogId);
            }
        }
        long endOpLogRecovery = System.currentTimeMillis();
        long elapsed = endOpLogRecovery - startOpLogRecovery;
        logger.info(LocalizedMessage.create(LocalizedStrings.DiskRegion_OPLOG_LOAD_TIME, elapsed));
    }
    if (!parent.isOfflineCompacting()) {
        long startRegionInit = System.currentTimeMillis();
        // to the map
        for (DiskRecoveryStore drs : this.currentRecoveryMap.values()) {
            drs.getDiskRegionView().initRecoveredEntryCount();
        }
        if (!this.alreadyRecoveredOnce.get()) {
            for (Oplog oplog : oplogSet) {
                if (oplog != getChild()) {
                    oplog.initAfterRecovery(parent.isOffline());
                }
            }
            if (getChild() == null) {
                setFirstChild(getSortedOplogs(), false);
            }
        }
        if (!parent.isOffline()) {
            if (recoverValues() && !recoverValuesSync()) {
                // TODO DAN - should we defer compaction until after
                // value recovery is complete? Or at least until after
                // value recovery for a given oplog is complete?
                // Right now, that's effectively what we're doing
                // because this uses up the compactor thread.
                parent.scheduleValueRecovery(oplogsNeedingValueRecovery, this.currentRecoveryMap);
            }
            if (!this.alreadyRecoveredOnce.get()) {
                // Create krfs for oplogs that are missing them
                for (Oplog oplog : oplogSet) {
                    if (oplog.needsKrf()) {
                        oplog.createKrfAsync();
                    }
                }
                parent.scheduleCompaction();
            }
            long endRegionInit = System.currentTimeMillis();
            logger.info(LocalizedMessage.create(LocalizedStrings.DiskRegion_REGION_INIT_TIME, endRegionInit - startRegionInit));
        }
    }
    return byteCount;
}
Also used : DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) OplogEntryIdSet(org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet) HashSet(java.util.HashSet) LongOpenHashSet(it.unimi.dsi.fastutil.longs.LongOpenHashSet)

Aggregations

OplogEntryIdSet (org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet)3 DiskRecoveryStore (org.apache.geode.internal.cache.persistence.DiskRecoveryStore)2 Test (org.junit.Test)2 LongOpenHashSet (it.unimi.dsi.fastutil.longs.LongOpenHashSet)1 File (java.io.File)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 StatisticsFactory (org.apache.geode.StatisticsFactory)1 LogWriterI18n (org.apache.geode.i18n.LogWriterI18n)1 DiskStoreID (org.apache.geode.internal.cache.persistence.DiskStoreID)1 DiskRegionVersionVector (org.apache.geode.internal.cache.versions.DiskRegionVersionVector)1 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)1 UnitTest (org.apache.geode.test.junit.categories.UnitTest)1 Expectations (org.jmock.Expectations)1