use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testOverflowUpdatedValue.
/**
* Tests that the updated value gets overflowed
*/
@Test
public void testOverflowUpdatedValue() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
// DiskRegion dr = ((LocalRegion) region).getDiskRegion();
// DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
// Put in larger stuff until we start evicting
int total;
for (total = 0; lruStats.getEvictions() <= 10; total++) {
int[] array = new int[250];
array[0] = total;
region.put(new Integer(total), array);
}
// Update a value
final Object newValue = "NEW VALUE";
final Object key = new Integer(0);
region.put(key, newValue);
assertEquals(newValue, region.get(key));
// overflowed
for (int i = 1; i < total; i++) {
region.get(new Integer(i));
}
// Make sure that the updated value got written to disk
assertEquals(newValue, region.get(key));
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testDestroy.
/**
* Tests destroying entries in an overflow region
*/
@Test
public void testDestroy() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
factory.setDiskSynchronous(true);
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
int total;
for (total = 0; lruStats.getEvictions() < 40; total++) {
region.put(new Integer(total), new byte[1000]);
}
assertEquals(0, diskStats.getRemoves());
long evictions = lruStats.getEvictions();
LogWriterUtils.getLogWriter().info("Destroying memory resident entries");
// Destroying each of these guys should have no effect on the disk
for (int i = total - 1; i >= evictions; i--) {
region.destroy(new Integer(i));
flush(region);
assertEquals(0, diskStats.getRemoves());
assertEquals(evictions, lruStats.getEvictions());
}
// long startRemoves = diskStats.getRemoves();
LogWriterUtils.getLogWriter().info("Destroying disk-resident entries. evictions=" + evictions);
// Destroying each of these guys should cause a removal from disk
for (int i = ((int) evictions) - 1; i >= 0; i--) {
region.destroy(new Integer(i));
flush(region);
assertEquals((evictions - i), diskStats.getRemoves());
}
assertEquals(evictions, lruStats.getEvictions());
LogWriterUtils.getLogWriter().info("keys remaining in region: " + region.keySet().size());
assertEquals(0, region.keySet().size());
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testLotsOfTombstones.
/**
* Test that we correctly recover and expire recovered tombstones, with compaction enabled
*/
@Test
public void testLotsOfTombstones() throws Throwable {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
// I think we just need to assert the number of tombstones, maybe?
// Bruce has code that won't even let the tombstones expire for 10 minutes
// That means on recovery we need to recover them all? Or do we need to recover
// any? We're doing a GII. Won't we GII tombstones anyway? Ahh, but we need
// to know that we don't need to record the new tombstones.
LocalRegion region = createRegion(vm0);
int initialCount = getTombstoneCount(region);
assertEquals(0, initialCount);
final int entryCount = 20;
for (int i = 0; i < entryCount; i++) {
region.put(i, new byte[100]);
// destroy each entry.
region.destroy(i);
}
assertEquals(entryCount, getTombstoneCount(region));
// roll to a new oplog
region.getDiskStore().forceRoll();
// Force a compaction. This should do nothing, because
// The tombstones are not garbage, so only 50% of the oplog
// is garbage (the creates).
region.getDiskStore().forceCompaction();
assertEquals(0, region.getDiskStore().numCompactableOplogs());
assertEquals(entryCount, getTombstoneCount(region));
getCache().close();
region = createRegion(vm0);
assertEquals(entryCount, getTombstoneCount(region));
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
TombstoneService tombstoneService = cache.getTombstoneService();
// Before expiring tombstones, no oplogs are available for compaction
assertEquals(0, region.getDiskStore().numCompactableOplogs());
region.getDiskStore().forceCompaction();
assertTrue(tombstoneService.forceBatchExpirationForTests(entryCount / 2));
assertEquals(entryCount / 2, getTombstoneCount(region));
// After expiring, we should have an oplog available for compaction.
assertEquals(1, region.getDiskStore().numCompactableOplogs());
// Test after restart the tombstones are still missing
getCache().close();
region = createRegion(vm0);
assertEquals(entryCount / 2, getTombstoneCount(region));
// We should have an oplog available for compaction, because the tombstones
// were garbage collected
assertEquals(1, region.getDiskStore().numCompactableOplogs());
// This should compact some oplogs
region.getDiskStore().forceCompaction();
assertEquals(0, region.getDiskStore().numCompactableOplogs());
// Restart again, and make sure the compaction didn't mess up our tombstone
// count
getCache().close();
region = createRegion(vm0);
assertEquals(entryCount / 2, getTombstoneCount(region));
cache = (GemFireCacheImpl) getCache();
// Add a test hook that will shutdown the system as soon as we write a GC RVV record
DiskStoreObserver.setInstance(new DiskStoreObserver() {
@Override
public void afterWriteGCRVV(DiskRegion dr) {
// preventing us from writing any other records.
throw new DiskAccessException();
}
});
IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException");
try {
// Force expiration, with our test hook that should close the cache
tombstoneService = cache.getTombstoneService();
tombstoneService.forceBatchExpirationForTests(entryCount / 4);
getCache().close();
assertTrue(cache.isClosed());
// Restart again, and make sure the tombstones are in fact removed
region = createRegion(vm0);
assertEquals(entryCount / 4, getTombstoneCount(region));
} finally {
ex.remove();
}
}
Aggregations