Search in sources :

Example 76 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class PersistentRVVRecoveryDUnitTest method testLotsOfTombstones.

/**
   * Test that we correctly recover and expire recovered tombstones, with compaction enabled
   */
@Test
public void testLotsOfTombstones() throws Throwable {
    Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    // I think we just need to assert the number of tombstones, maybe?
    // Bruce has code that won't even let the tombstones expire for 10 minutes
    // That means on recovery we need to recover them all? Or do we need to recover
    // any? We're doing a GII. Won't we GII tombstones anyway? Ahh, but we need
    // to know that we don't need to record the new tombstones.
    LocalRegion region = createRegion(vm0);
    int initialCount = getTombstoneCount(region);
    assertEquals(0, initialCount);
    final int entryCount = 20;
    for (int i = 0; i < entryCount; i++) {
        region.put(i, new byte[100]);
        // destroy each entry.
        region.destroy(i);
    }
    assertEquals(entryCount, getTombstoneCount(region));
    // roll to a new oplog
    region.getDiskStore().forceRoll();
    // Force a compaction. This should do nothing, because
    // The tombstones are not garbage, so only 50% of the oplog
    // is garbage (the creates).
    region.getDiskStore().forceCompaction();
    assertEquals(0, region.getDiskStore().numCompactableOplogs());
    assertEquals(entryCount, getTombstoneCount(region));
    getCache().close();
    region = createRegion(vm0);
    assertEquals(entryCount, getTombstoneCount(region));
    GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
    TombstoneService tombstoneService = cache.getTombstoneService();
    // Before expiring tombstones, no oplogs are available for compaction
    assertEquals(0, region.getDiskStore().numCompactableOplogs());
    region.getDiskStore().forceCompaction();
    assertTrue(tombstoneService.forceBatchExpirationForTests(entryCount / 2));
    assertEquals(entryCount / 2, getTombstoneCount(region));
    // After expiring, we should have an oplog available for compaction.
    assertEquals(1, region.getDiskStore().numCompactableOplogs());
    // Test after restart the tombstones are still missing
    getCache().close();
    region = createRegion(vm0);
    assertEquals(entryCount / 2, getTombstoneCount(region));
    // We should have an oplog available for compaction, because the tombstones
    // were garbage collected
    assertEquals(1, region.getDiskStore().numCompactableOplogs());
    // This should compact some oplogs
    region.getDiskStore().forceCompaction();
    assertEquals(0, region.getDiskStore().numCompactableOplogs());
    // Restart again, and make sure the compaction didn't mess up our tombstone
    // count
    getCache().close();
    region = createRegion(vm0);
    assertEquals(entryCount / 2, getTombstoneCount(region));
    cache = (GemFireCacheImpl) getCache();
    // Add a test hook that will shutdown the system as soon as we write a GC RVV record
    DiskStoreObserver.setInstance(new DiskStoreObserver() {

        @Override
        public void afterWriteGCRVV(DiskRegion dr) {
            // preventing us from writing any other records.
            throw new DiskAccessException();
        }
    });
    IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException");
    try {
        // Force expiration, with our test hook that should close the cache
        tombstoneService = cache.getTombstoneService();
        tombstoneService.forceBatchExpirationForTests(entryCount / 4);
        getCache().close();
        assertTrue(cache.isClosed());
        // Restart again, and make sure the tombstones are in fact removed
        region = createRegion(vm0);
        assertEquals(entryCount / 4, getTombstoneCount(region));
    } finally {
        ex.remove();
    }
}
Also used : DiskRegion(org.apache.geode.internal.cache.DiskRegion) VM(org.apache.geode.test.dunit.VM) TombstoneService(org.apache.geode.internal.cache.TombstoneService) DiskAccessException(org.apache.geode.cache.DiskAccessException) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) IgnoredException(org.apache.geode.test.dunit.IgnoredException) Host(org.apache.geode.test.dunit.Host) DiskStoreObserver(org.apache.geode.internal.cache.DiskStoreObserver) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

DiskAccessException (org.apache.geode.cache.DiskAccessException)76 IOException (java.io.IOException)44 InterruptedIOException (java.io.InterruptedIOException)17 StoredObject (org.apache.geode.internal.offheap.StoredObject)13 HeapDataOutputStream (org.apache.geode.internal.HeapDataOutputStream)11 ByteBuffer (java.nio.ByteBuffer)9 Test (org.junit.Test)8 Version (org.apache.geode.internal.Version)6 File (java.io.File)5 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)5 IndexManager (org.apache.geode.cache.query.internal.index.IndexManager)5 UninterruptibleFileChannel (org.apache.geode.internal.cache.persistence.UninterruptibleFileChannel)5 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)5 Released (org.apache.geode.internal.offheap.annotations.Released)5 BufferedInputStream (java.io.BufferedInputStream)4 FileInputStream (java.io.FileInputStream)4 CancelException (org.apache.geode.CancelException)4 BytesAndBits (org.apache.geode.internal.cache.persistence.BytesAndBits)4 UninterruptibleRandomAccessFile (org.apache.geode.internal.cache.persistence.UninterruptibleRandomAccessFile)4 EOFException (java.io.EOFException)3