use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class TestDiskRegion method main.
public static void main(String[] args) throws Exception {
DistributedSystem system = DistributedSystem.connect(new java.util.Properties());
Cache cache = CacheFactory.create(system);
AttributesFactory factory = new AttributesFactory();
factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(2, (ObjectSizer) null, EvictionAction.OVERFLOW_TO_DISK));
DiskStoreFactory dsf = cache.createDiskStoreFactory();
File user_dir = new File(System.getProperty("user.dir"));
if (!user_dir.exists()) {
user_dir.mkdir();
}
File[] dirs1 = new File[] { user_dir };
DiskStore ds1 = dsf.setDiskDirs(dirs1).create("TestDiskRegion");
factory.setDiskStoreName("TestDiskRegion");
LocalRegion region = (LocalRegion) cache.createRegion("TestDiskRegion", factory.create());
DiskRegion dr = region.getDiskRegion();
Assert.assertTrue(dr != null);
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
Assert.assertTrue(diskStats != null);
Assert.assertTrue(lruStats != null);
// Put some small stuff
for (int i = 0; i < 10; i++) {
region.put(new Integer(i), String.valueOf(i));
}
Assert.assertTrue(diskStats.getWrites() == 0);
Assert.assertTrue(diskStats.getReads() == 0);
Assert.assertTrue(lruStats.getEvictions() == 0);
// // Make sure we can get them back okay
// for (int i = 0; i < 10; i++) {
// Object value = region.get(new Integer(i));
// Assert.assertTrue(value != null);
// Assert.assertTrue(String.valueOf(i).equals(value));
// }
// Put in larger stuff until we start evicting
int total;
for (total = 0; lruStats.getEvictions() <= 0; total++) {
System.out.println("total puts " + total + ", evictions " + lruStats.getEvictions() + ", total entry size " + lruStats.getCounter());
int[] array = new int[250];
array[0] = total;
region.put(new Integer(total), array);
}
Assert.assertTrue(diskStats.getWrites() == 1);
Assert.assertTrue(diskStats.getReads() == 0);
Assert.assertTrue(lruStats.getEvictions() == 1);
System.out.println("---------- Finished Putting -------------");
Object value = region.get(new Integer(0));
Assert.assertTrue(value != null);
Assert.assertTrue(((int[]) value)[0] == 0);
Assert.assertTrue(diskStats.getWrites() == 2, String.valueOf(diskStats.getWrites()));
Assert.assertTrue(diskStats.getReads() == 1);
Assert.assertTrue(lruStats.getEvictions() == 2, String.valueOf(lruStats.getEvictions()));
System.out.println("---------- Getting ALL -------------");
for (int i = 0; i < total; i++) {
System.out.println("total gets " + i + ", evictions " + lruStats.getEvictions() + ", total entry size " + lruStats.getCounter());
int[] array = (int[]) region.get(new Integer(i));
Assert.assertTrue(array != null);
Assert.assertTrue(i == array[0]);
}
System.out.println("--------- Updating --------------");
long startEvictions = lruStats.getEvictions();
for (int i = 0; i < 10; i++) {
region.put(new Integer(i), new int[251]);
long expected = startEvictions + 1 + i;
long actual = lruStats.getEvictions();
Assert.assertTrue(expected == actual, "For " + i + " expected " + expected + ", got " + actual);
}
System.out.println("Done. Waiting for stats to be written...");
Thread.sleep(5 * 1000);
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testNoFaults.
/**
* Overflows a region and makes sure that gets of recently-used objects do not cause faults.
*/
@Test
public void testNoFaults() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
// Put in larger stuff until we start evicting
int total;
for (total = 0; lruStats.getEvictions() <= 20; total++) {
// System.out.println("total " + total + ", evictions " +
// lruStats.getEvictions());
int[] array = new int[250];
array[0] = total;
region.put(new Integer(total), array);
}
assertTrue(total > 40);
long firstEvictions = lruStats.getEvictions();
long firstReads = diskStats.getReads();
for (int i = 1; i <= 40; i++) {
int key = total - i;
region.get(new Integer(key));
assertEquals("Key " + key + " caused an eviction", firstEvictions, lruStats.getEvictions());
assertEquals("Key " + key + " caused an eviction", firstReads, diskStats.getReads());
}
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testCacheEvents.
/**
* Tests cache listeners in an overflow region are invoked and that their events are reasonable.
*/
@Test
public void testCacheEvents() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
TestCacheListener listener = new TestCacheListener() {
public void afterCreate2(EntryEvent event) {
}
};
factory.addCacheListener(listener);
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
factory.setDiskSynchronous(true);
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
// DiskRegion dr = ((LocalRegion) region).getDiskRegion();
// DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
int total;
for (total = 0; lruStats.getEvictions() < 20; total++) {
region.put(new Integer(total), String.valueOf(total));
assertEquals(String.valueOf(total), region.get(new Integer(total)));
}
assertTrue(listener.wasInvoked());
listener = new TestCacheListener() {
public void close2() {
}
};
region.getAttributesMutator().setCacheListener(listener);
for (int i = 0; i < total; i++) {
String value = (String) region.get(new Integer(i));
assertNotNull(value);
assertEquals(String.valueOf(i), value);
}
assertFalse(listener.wasInvoked());
listener = new TestCacheListener() {
public void afterUpdate2(EntryEvent event) {
Integer key = (Integer) event.getKey();
assertEquals(null, event.getOldValue());
assertEquals(false, event.isOldValueAvailable());
byte[] value = (byte[]) event.getNewValue();
assertEquals(key.intValue(), value.length);
}
};
region.getAttributesMutator().setCacheListener(listener);
for (int i = 0; i < 20; i++) {
region.put(new Integer(i), new byte[i]);
}
assertTrue(listener.wasInvoked());
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testBackupStatistics.
/**
* Tests that the disk region statistics are updated correctly for persist backup regions.
*/
@Test
public void testBackupStatistics() throws CacheException {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
final int total = 10;
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
factory.setDiskSynchronous(true);
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
assertNotNull(dr);
DiskRegionStats diskStats = dr.getStats();
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getNumEntriesInVM());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
for (int i = 0; i < total; i++) {
String s = String.valueOf(i);
region.put(s, s);
assertEquals(i + 1, diskStats.getWrites());
assertEquals(i + 1, diskStats.getNumEntriesInVM());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
}
region.put("foobar", "junk");
assertEquals(total + 1, diskStats.getWrites());
assertEquals(total + 1, diskStats.getNumEntriesInVM());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
region.localDestroy("foobar");
// destroy becomes a tombstone
assertEquals(total + 2, diskStats.getWrites());
assertEquals(total + 0, diskStats.getNumEntriesInVM());
assertEquals(0, diskStats.getReads());
region.put("foobar2", "junk");
flush(region);
region.localDestroy("foobar2");
assertEquals(total, region.keySet().size());
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testDiskRegionOverflow.
//////// Test Methods
/**
* Tests that data overflows correctly to a disk region
*/
@Test
public void testDiskRegionOverflow() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
factory.setDiskSynchronous(true);
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
assertNotNull(dr);
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
assertNotNull(diskStats);
assertNotNull(lruStats);
flush(region);
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, lruStats.getEvictions());
// Put in larger stuff until we start evicting
int total;
for (total = 0; lruStats.getEvictions() <= 0; total++) {
// getLogWriter().info("DEBUG: total " + total + ", evictions " + lruStats.getEvictions());
int[] array = new int[250];
array[0] = total;
region.put(new Integer(total), array);
}
flush(region);
LogWriterUtils.getLogWriter().info("DEBUG: writes=" + diskStats.getWrites() + " reads=" + diskStats.getReads() + " evictions=" + lruStats.getEvictions() + " total=" + total + " numEntriesInVM=" + diskStats.getNumEntriesInVM() + " numOverflows=" + diskStats.getNumOverflowOnDisk());
assertEquals(1, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(1, lruStats.getEvictions());
assertEquals(1, diskStats.getNumOverflowOnDisk());
assertEquals(total - 1, diskStats.getNumEntriesInVM());
Object value = region.get(new Integer(0));
flush(region);
assertNotNull(value);
assertEquals(0, ((int[]) value)[0]);
LogWriterUtils.getLogWriter().info("DEBUG: writes=" + diskStats.getWrites() + " reads=" + diskStats.getReads() + " evictions=" + lruStats.getEvictions() + " total=" + total + " numEntriesInVM=" + diskStats.getNumEntriesInVM() + " numOverflows=" + diskStats.getNumOverflowOnDisk());
assertEquals(2, diskStats.getWrites());
assertEquals(1, diskStats.getReads());
assertEquals(2, lruStats.getEvictions());
for (int i = 0; i < total; i++) {
int[] array = (int[]) region.get(new Integer(i));
assertNotNull(array);
assertEquals(i, array[0]);
}
}
Aggregations