use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class MemberMBeanBridge method addRegion.
public void addRegion(Region region) {
if (region.getAttributes().getPartitionAttributes() != null) {
addPartionRegionStats(((PartitionedRegion) region).getPrStats());
}
LocalRegion l = (LocalRegion) region;
if (l.getEvictionController() != null) {
LRUStatistics stats = l.getEvictionController().getLRUHelper().getStats();
if (stats != null) {
addLRUStats(stats);
}
}
DiskRegion dr = l.getDiskRegion();
if (dr != null) {
for (DirectoryHolder dh : dr.getDirectories()) {
addDirectoryStats(dh.getDiskDirectoryStats());
}
}
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class PersistentRecoveryOrderDUnitTest method testFinishIncompleteInitializationNoSend.
@Test
public void testFinishIncompleteInitializationNoSend() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
// Add a hook which will disconnect the DS before sending a prepare message
vm1.invoke(new SerializableRunnable() {
public void run() {
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeSendMessage(DistributionManager dm, DistributionMessage message) {
if (message instanceof PrepareNewPersistentMemberMessage) {
DistributionMessageObserver.setInstance(null);
getSystem().disconnect();
}
}
@Override
public void afterProcessMessage(DistributionManager dm, DistributionMessage message) {
}
});
}
});
createPersistentRegion(vm0);
putAnEntry(vm0);
updateTheEntry(vm0);
try {
createPersistentRegion(vm1);
} catch (Exception e) {
if (!(e.getCause() instanceof DistributedSystemDisconnectedException)) {
throw e;
}
}
closeRegion(vm0);
// This wait for VM0 to come back
AsyncInvocation async1 = createPersistentRegionAsync(vm1);
waitForBlockedInitialization(vm1);
createPersistentRegion(vm0);
async1.getResult();
checkForEntry(vm1);
vm0.invoke(new SerializableRunnable("check for offline members") {
public void run() {
Cache cache = getCache();
DistributedRegion region = (DistributedRegion) cache.getRegion(REGION_NAME);
PersistentMembershipView view = region.getPersistenceAdvisor().getMembershipView();
DiskRegion dr = region.getDiskRegion();
assertEquals(Collections.emptySet(), dr.getOfflineMembers());
assertEquals(1, dr.getOnlineMembers().size());
}
});
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testLRUCapacityController.
/**
* Tests disk overflow with an entry-based {@link LRUCapacityController}.
*/
@Test
public void testLRUCapacityController() throws CacheException {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1000, EvictionAction.OVERFLOW_TO_DISK));
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
factory.setDiskSynchronous(true);
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
flush(region);
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, lruStats.getEvictions());
// Put in larger stuff until we start evicting
for (int i = 1; i <= 1000; i++) {
// System.out.println("total " + i + ", evictions " +
// lruStats.getEvictions());
Object key = new Integer(i);
Object value = String.valueOf(i);
region.put(key, value);
assertEquals(i, lruStats.getCounter());
assertEquals(0, lruStats.getEvictions());
assertEquals("On iteration " + i, 0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
}
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
// Add a new value
region.put(new Integer(1000 + 1), String.valueOf(1000 + 1));
assertEquals(1000, lruStats.getCounter());
assertEquals(1, lruStats.getEvictions());
assertEquals(1, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(1, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
// Add another new value
region.put(new Integer(1000 + 2), String.valueOf(1000 + 2));
assertEquals(1000, lruStats.getCounter());
assertEquals(2, lruStats.getEvictions());
assertEquals(2, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(2, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
// Replace a value
region.put(new Integer(1000), String.valueOf(1000));
assertEquals(1000, lruStats.getCounter());
assertEquals(2, lruStats.getEvictions());
assertEquals(2, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(2, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testValues.
/**
* Tests iterating over all of the values when some have been overflowed.
*/
@Test
public void testValues() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
// DiskRegion dr = ((LocalRegion) region).getDiskRegion();
// DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
// Put in larger stuff until we start evicting
int total;
for (total = 0; lruStats.getEvictions() <= 0; total++) {
int[] array = new int[250];
array[0] = total;
region.put(new Integer(total), array);
}
BitSet bits = new BitSet();
Collection values = region.values();
assertEquals(total, values.size());
for (Iterator iter = values.iterator(); iter.hasNext(); ) {
Object value = iter.next();
assertNotNull(value);
int[] array = (int[]) value;
int i = array[0];
assertFalse("Bit " + i + " is already set", bits.get(i));
bits.set(i);
}
}
use of org.apache.geode.internal.cache.DiskRegion in project geode by apache.
the class DiskRegionDUnitTest method testRegionEntryValues.
// testSwitchOut is no longer valid
// the test was not written correctly to recover
// and if it was it would now fail with a split brain
// testSwitchIn is no longer valid
// we no longer switchIn files if GII aborts.
/**
* Tests getting the {@linkplain org.apache.geode.cache.Region.Entry#getValue values} of region
* entries that have been overflowed.
*/
@Test
public void testRegionEntryValues() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
// DiskRegion dr = ((LocalRegion) region).getDiskRegion();
// DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
// Put in larger stuff until we start evicting
int total;
for (total = 0; lruStats.getEvictions() <= 0; total++) {
int[] array = new int[250];
array[0] = total;
region.put(new Integer(total), array);
}
// BitSet bits = new BitSet();
Set values = region.entrySet(false);
assertEquals(total, values.size());
for (Iterator iter = values.iterator(); iter.hasNext(); ) {
Region.Entry entry = (Region.Entry) iter.next();
Integer key = (Integer) entry.getKey();
int[] value = (int[]) entry.getValue();
assertNotNull(value);
assertEquals("Key/value" + key, key.intValue(), value[0]);
}
}
Aggregations