use of org.apache.geode.internal.cache.DiskRegionStats in project geode by apache.
the class RegionStatsJUnitTest method init.
protected void init() {
cachePerfStats = new CachePerfStats(system);
partitionedRegionStats = new PartitionedRegionStats(system, "/tests");
diskRegionStats = new DiskRegionStats(system, "test-disk");
bridge = new RegionMBeanBridge(cachePerfStats);
parBridge = new PartitionedRegionBridge(partitionedRegionStats);
diskBridge = new DiskRegionBridge(diskRegionStats);
}
use of org.apache.geode.internal.cache.DiskRegionStats in project geode by apache.
the class DiskRegionDUnitTest method testLRUCapacityController.
/**
* Tests disk overflow with an entry-based {@link LRUCapacityController}.
*/
@Test
public void testLRUCapacityController() throws CacheException {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1000, EvictionAction.OVERFLOW_TO_DISK));
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
factory.setDiskSynchronous(true);
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
flush(region);
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, lruStats.getEvictions());
// Put in larger stuff until we start evicting
for (int i = 1; i <= 1000; i++) {
// System.out.println("total " + i + ", evictions " +
// lruStats.getEvictions());
Object key = new Integer(i);
Object value = String.valueOf(i);
region.put(key, value);
assertEquals(i, lruStats.getCounter());
assertEquals(0, lruStats.getEvictions());
assertEquals("On iteration " + i, 0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
}
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
// Add a new value
region.put(new Integer(1000 + 1), String.valueOf(1000 + 1));
assertEquals(1000, lruStats.getCounter());
assertEquals(1, lruStats.getEvictions());
assertEquals(1, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(1, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
// Add another new value
region.put(new Integer(1000 + 2), String.valueOf(1000 + 2));
assertEquals(1000, lruStats.getCounter());
assertEquals(2, lruStats.getEvictions());
assertEquals(2, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(2, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
// Replace a value
region.put(new Integer(1000), String.valueOf(1000));
assertEquals(1000, lruStats.getCounter());
assertEquals(2, lruStats.getEvictions());
assertEquals(2, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(2, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
}
use of org.apache.geode.internal.cache.DiskRegionStats in project geode by apache.
the class DiskRegionDUnitTest method testTestHookStatistics.
/**
* Tests that the "test hook" {@link DiskRegionStats} work as advertised.
*/
@Test
public void testTestHookStatistics() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
// factory.setConcurrencyChecksEnabled(false);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
factory.setDiskSynchronous(true);
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
LocalRegion region = (LocalRegion) createRegion(name, factory.create());
DiskRegion dr = region.getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
// Put in stuff until we start evicting
int total;
for (total = 0; lruStats.getEvictions() <= 0; total++) {
int[] array = new int[1];
array[0] = total;
region.put(new Integer(total), array);
if (lruStats.getEvictions() <= 0) {
assertEquals(total + 1, diskStats.getNumEntriesInVM());
}
}
assertEquals(1, diskStats.getNumOverflowOnDisk());
// Net change of zero
region.get(new Integer(0));
assertEquals(region.entryCount(), diskStats.getNumEntriesInVM() + diskStats.getNumOverflowOnDisk());
assertEquals(total - 1, diskStats.getNumEntriesInVM());
assertEquals(1, diskStats.getNumOverflowOnDisk());
// Kick out 4 entries
region.put(new Integer(total + 10), new int[1]);
region.put(new Integer(total + 11), new int[1]);
region.put(new Integer(total + 12), new int[1]);
region.put(new Integer(total + 13), new int[1]);
assertEquals(region.entryCount(), diskStats.getNumEntriesInVM() + diskStats.getNumOverflowOnDisk());
assertEquals(total - 1, diskStats.getNumEntriesInVM());
assertEquals(5, diskStats.getNumOverflowOnDisk());
// Make sure invalidate of inVM entry changes inVM count but not disk
region.invalidate(new Integer(total + 10));
assertEquals(region.entryCount() - 1, diskStats.getNumEntriesInVM() + diskStats.getNumOverflowOnDisk());
assertEquals(total - 2, diskStats.getNumEntriesInVM());
assertEquals(5, diskStats.getNumOverflowOnDisk());
// Make sure local-invalidate of inVM entry changes inVM count but not disk
region.localInvalidate(new Integer(total + 11));
assertEquals(region.entryCount() - 2, diskStats.getNumEntriesInVM() + diskStats.getNumOverflowOnDisk());
assertEquals(total - 3, diskStats.getNumEntriesInVM());
assertEquals(5, diskStats.getNumOverflowOnDisk());
// Make sure destroy of invalid entry does not change inVM or onDisk but changes entry count
region.destroy(new Integer(total + 10));
// ((LocalRegion)region).dumpBackingMap();
assertEquals(region.entryCount() - 1, diskStats.getNumEntriesInVM() + diskStats.getNumOverflowOnDisk());
assertEquals(total - 3, diskStats.getNumEntriesInVM());
assertEquals(5, diskStats.getNumOverflowOnDisk());
// Make sure destroy of inVM entry does change inVM but not onDisk
region.destroy(new Integer(total + 12));
assertEquals(region.entryCount() - 1, diskStats.getNumEntriesInVM() + diskStats.getNumOverflowOnDisk());
assertEquals(total - 4, diskStats.getNumEntriesInVM());
assertEquals(5, diskStats.getNumOverflowOnDisk());
// Destroy an entry that has been overflowed
region.destroy(new Integer(3));
assertEquals(region.entryCount() - 1, diskStats.getNumEntriesInVM() + diskStats.getNumOverflowOnDisk());
assertEquals(total - 4, diskStats.getNumEntriesInVM());
assertEquals(4, diskStats.getNumOverflowOnDisk());
}
use of org.apache.geode.internal.cache.DiskRegionStats in project geode by apache.
the class DiskManagementDUnitTest method verifyRecoveryStats.
private void verifyRecoveryStats(final VM memberVM, final boolean localRecovery) {
memberVM.invoke("verifyRecoveryStats", () -> {
Cache cache = this.managementTestRule.getCache();
Region region = cache.getRegion(REGION_NAME);
DistributedRegion distributedRegion = (DistributedRegion) region;
DiskRegionStats stats = distributedRegion.getDiskRegion().getStats();
if (localRecovery) {
assertThat(stats.getLocalInitializations()).isEqualTo(1);
assertThat(stats.getRemoteInitializations()).isEqualTo(0);
} else {
assertThat(stats.getLocalInitializations()).isEqualTo(0);
assertThat(stats.getRemoteInitializations()).isEqualTo(1);
}
});
}
use of org.apache.geode.internal.cache.DiskRegionStats in project geode by apache.
the class TestDiskRegion method main1.
public static void main1(String[] args) throws Exception {
DistributedSystem system = DistributedSystem.connect(new java.util.Properties());
Cache cache = CacheFactory.create(system);
AttributesFactory factory = new AttributesFactory();
factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(2, (ObjectSizer) null, EvictionAction.OVERFLOW_TO_DISK));
factory.setCacheListener(new CacheListenerAdapter() {
public void afterUpdate(EntryEvent event) {
System.out.println("UPDATE: " + event.getKey() + " -> (" + event.getOldValue() + " -> " + event.getNewValue() + ")");
}
});
LocalRegion region = (LocalRegion) cache.createRegion("TestDiskRegion", factory.create());
DiskRegion dr = region.getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
System.out.println("Hit enter to perform action");
for (int i = 0; true; i++) {
br.readLine();
// Thread.sleep(500);
Object key = new Integer(i);
Object value = new byte[200000];
region.put(key, value);
System.out.println(key + " -> " + value + " evictions = " + lruStats.getEvictions() + ", writes = " + diskStats.getWrites());
}
}
Aggregations