Search in sources :

Example 16 with DiskRegion

use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.

the class DiskRegionDUnitTest method testOverflowUpdatedValue.

/**
   * Tests that the updated value gets overflowed
   */
@Test
public void testOverflowUpdatedValue() throws Exception {
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
    File d = new File("DiskRegions" + OSProcess.getId());
    d.mkdirs();
    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
    dsf.setDiskDirs(new File[] { d });
    DiskStore ds = dsf.create(name);
    factory.setDiskStoreName(ds.getName());
    Region region = createRegion(name, factory.create());
    // DiskRegion dr = ((LocalRegion) region).getDiskRegion();
    // DiskRegionStats diskStats = dr.getStats();
    LRUStatistics lruStats = getLRUStats(region);
    // Put in larger stuff until we start evicting
    int total;
    for (total = 0; lruStats.getEvictions() <= 10; total++) {
        int[] array = new int[250];
        array[0] = total;
        region.put(new Integer(total), array);
    }
    // Update a value
    final Object newValue = "NEW VALUE";
    final Object key = new Integer(0);
    region.put(key, newValue);
    assertEquals(newValue, region.get(key));
    // overflowed
    for (int i = 1; i < total; i++) {
        region.get(new Integer(i));
    }
    // Make sure that the updated value got written to disk
    assertEquals(newValue, region.get(key));
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 17 with DiskRegion

use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.

the class DiskRegionDUnitTest method testDestroy.

/**
   * Tests destroying entries in an overflow region
   */
@Test
public void testDestroy() throws Exception {
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
    File d = new File("DiskRegions" + OSProcess.getId());
    d.mkdirs();
    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
    dsf.setDiskDirs(new File[] { d });
    factory.setDiskSynchronous(true);
    DiskStore ds = dsf.create(name);
    factory.setDiskStoreName(ds.getName());
    Region region = createRegion(name, factory.create());
    DiskRegion dr = ((LocalRegion) region).getDiskRegion();
    DiskRegionStats diskStats = dr.getStats();
    LRUStatistics lruStats = getLRUStats(region);
    int total;
    for (total = 0; lruStats.getEvictions() < 40; total++) {
        region.put(new Integer(total), new byte[1000]);
    }
    assertEquals(0, diskStats.getRemoves());
    long evictions = lruStats.getEvictions();
    LogWriterUtils.getLogWriter().info("Destroying memory resident entries");
    // Destroying each of these guys should have no effect on the disk
    for (int i = total - 1; i >= evictions; i--) {
        region.destroy(new Integer(i));
        flush(region);
        assertEquals(0, diskStats.getRemoves());
        assertEquals(evictions, lruStats.getEvictions());
    }
    // long startRemoves = diskStats.getRemoves();
    LogWriterUtils.getLogWriter().info("Destroying disk-resident entries.  evictions=" + evictions);
    // Destroying each of these guys should cause a removal from disk
    for (int i = ((int) evictions) - 1; i >= 0; i--) {
        region.destroy(new Integer(i));
        flush(region);
        assertEquals((evictions - i), diskStats.getRemoves());
    }
    assertEquals(evictions, lruStats.getEvictions());
    LogWriterUtils.getLogWriter().info("keys remaining in region: " + region.keySet().size());
    assertEquals(0, region.keySet().size());
}
Also used : DiskRegionStats(org.apache.geode.internal.cache.DiskRegionStats) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) File(java.io.File) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 18 with DiskRegion

use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.

the class PersistentRVVRecoveryDUnitTest method testLotsOfTombstones.

/**
   * Test that we correctly recover and expire recovered tombstones, with compaction enabled
   */
@Test
public void testLotsOfTombstones() throws Throwable {
    Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    // I think we just need to assert the number of tombstones, maybe?
    // Bruce has code that won't even let the tombstones expire for 10 minutes
    // That means on recovery we need to recover them all? Or do we need to recover
    // any? We're doing a GII. Won't we GII tombstones anyway? Ahh, but we need
    // to know that we don't need to record the new tombstones.
    LocalRegion region = createRegion(vm0);
    int initialCount = getTombstoneCount(region);
    assertEquals(0, initialCount);
    final int entryCount = 20;
    for (int i = 0; i < entryCount; i++) {
        region.put(i, new byte[100]);
        // destroy each entry.
        region.destroy(i);
    }
    assertEquals(entryCount, getTombstoneCount(region));
    // roll to a new oplog
    region.getDiskStore().forceRoll();
    // Force a compaction. This should do nothing, because
    // The tombstones are not garbage, so only 50% of the oplog
    // is garbage (the creates).
    region.getDiskStore().forceCompaction();
    assertEquals(0, region.getDiskStore().numCompactableOplogs());
    assertEquals(entryCount, getTombstoneCount(region));
    getCache().close();
    region = createRegion(vm0);
    assertEquals(entryCount, getTombstoneCount(region));
    GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
    TombstoneService tombstoneService = cache.getTombstoneService();
    // Before expiring tombstones, no oplogs are available for compaction
    assertEquals(0, region.getDiskStore().numCompactableOplogs());
    region.getDiskStore().forceCompaction();
    assertTrue(tombstoneService.forceBatchExpirationForTests(entryCount / 2));
    assertEquals(entryCount / 2, getTombstoneCount(region));
    // After expiring, we should have an oplog available for compaction.
    assertEquals(1, region.getDiskStore().numCompactableOplogs());
    // Test after restart the tombstones are still missing
    getCache().close();
    region = createRegion(vm0);
    assertEquals(entryCount / 2, getTombstoneCount(region));
    // We should have an oplog available for compaction, because the tombstones
    // were garbage collected
    assertEquals(1, region.getDiskStore().numCompactableOplogs());
    // This should compact some oplogs
    region.getDiskStore().forceCompaction();
    assertEquals(0, region.getDiskStore().numCompactableOplogs());
    // Restart again, and make sure the compaction didn't mess up our tombstone
    // count
    getCache().close();
    region = createRegion(vm0);
    assertEquals(entryCount / 2, getTombstoneCount(region));
    cache = (GemFireCacheImpl) getCache();
    // Add a test hook that will shutdown the system as soon as we write a GC RVV record
    DiskStoreObserver.setInstance(new DiskStoreObserver() {

        @Override
        public void afterWriteGCRVV(DiskRegion dr) {
            // preventing us from writing any other records.
            throw new DiskAccessException();
        }
    });
    IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException");
    try {
        // Force expiration, with our test hook that should close the cache
        tombstoneService = cache.getTombstoneService();
        tombstoneService.forceBatchExpirationForTests(entryCount / 4);
        getCache().close();
        assertTrue(cache.isClosed());
        // Restart again, and make sure the tombstones are in fact removed
        region = createRegion(vm0);
        assertEquals(entryCount / 4, getTombstoneCount(region));
    } finally {
        ex.remove();
    }
}
Also used : DiskRegion(org.apache.geode.internal.cache.DiskRegion) VM(org.apache.geode.test.dunit.VM) TombstoneService(org.apache.geode.internal.cache.TombstoneService) DiskAccessException(org.apache.geode.cache.DiskAccessException) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) IgnoredException(org.apache.geode.test.dunit.IgnoredException) Host(org.apache.geode.test.dunit.Host) DiskStoreObserver(org.apache.geode.internal.cache.DiskStoreObserver) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

DiskRegion (org.apache.geode.internal.cache.DiskRegion)18 LocalRegion (org.apache.geode.internal.cache.LocalRegion)17 LRUStatistics (org.apache.geode.internal.cache.lru.LRUStatistics)15 AttributesFactory (org.apache.geode.cache.AttributesFactory)14 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)14 Test (org.junit.Test)14 File (java.io.File)13 DiskStore (org.apache.geode.cache.DiskStore)13 DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)13 Region (org.apache.geode.cache.Region)11 DiskRegionStats (org.apache.geode.internal.cache.DiskRegionStats)8 Cache (org.apache.geode.cache.Cache)3 Host (org.apache.geode.test.dunit.Host)3 VM (org.apache.geode.test.dunit.VM)3 BitSet (java.util.BitSet)2 Iterator (java.util.Iterator)2 EntryEvent (org.apache.geode.cache.EntryEvent)2 ObjectSizer (org.apache.geode.cache.util.ObjectSizer)2 DistributedSystem (org.apache.geode.distributed.DistributedSystem)2 DirectoryHolder (org.apache.geode.internal.cache.DirectoryHolder)2