use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DescribeDiskStoreFunctionJUnitTest method createMockDiskStore.
private DiskStore createMockDiskStore(final UUID diskStoreId, final String name, final boolean allowForceCompaction, final boolean autoCompact, final int compactionThreshold, final long maxOplogSize, final int queueSize, final long timeInterval, final int writeBufferSize, final File[] diskDirs, final int[] diskDirSizes, final float warningPercentage, final float criticalPercentage) {
final DiskStore mockDiskStore = mockContext.mock(DiskStore.class, name);
mockContext.checking(new Expectations() {
{
oneOf(mockDiskStore).getAllowForceCompaction();
will(returnValue(allowForceCompaction));
oneOf(mockDiskStore).getAutoCompact();
will(returnValue(autoCompact));
oneOf(mockDiskStore).getCompactionThreshold();
will(returnValue(compactionThreshold));
atLeast(1).of(mockDiskStore).getDiskStoreUUID();
will(returnValue(diskStoreId));
oneOf(mockDiskStore).getMaxOplogSize();
will(returnValue(maxOplogSize));
atLeast(1).of(mockDiskStore).getName();
will(returnValue(name));
oneOf(mockDiskStore).getQueueSize();
will(returnValue(queueSize));
oneOf(mockDiskStore).getTimeInterval();
will(returnValue(timeInterval));
oneOf(mockDiskStore).getWriteBufferSize();
will(returnValue(writeBufferSize));
allowing(mockDiskStore).getDiskDirs();
will(returnValue(diskDirs));
allowing(mockDiskStore).getDiskDirSizes();
will(returnValue(diskDirSizes));
allowing(mockDiskStore).getDiskUsageWarningPercentage();
will(returnValue(warningPercentage));
allowing(mockDiskStore).getDiskUsageCriticalPercentage();
will(returnValue(criticalPercentage));
}
});
return mockDiskStore;
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionDUnitTest method testBackup.
@Test
public void testBackup() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
factory.setDiskSynchronous(true);
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
int total = 10;
{
Region region = createRegion(name, factory.create());
for (int i = 0; i < total; i++) {
String s = String.valueOf(i);
region.put(s, s);
}
region.put("foobar", "junk");
region.localDestroy("foobar");
region.put("foobar2", "junk");
flush(region);
region.localDestroy("foobar2");
// test invalidate
region.put("invalid", "invalid");
flush(region);
region.invalidate("invalid");
flush(region);
assertTrue(region.containsKey("invalid") && !region.containsValueForKey("invalid"));
total++;
// test local-invalidate
region.put("localinvalid", "localinvalid");
flush(region);
region.localInvalidate("localinvalid");
flush(region);
assertTrue(region.containsKey("localinvalid") && !region.containsValueForKey("localinvalid"));
total++;
// test byte[] values
region.put("byteArray", new byte[0]);
flush(region);
assertArrayEquals(new byte[0], region.get("byteArray"));
total++;
// test modification
region.put("modified", "originalValue");
flush(region);
region.put("modified", "modified");
flush(region);
assertEquals("modified", region.get("modified"));
total++;
assertEquals(total, region.keySet().size());
}
// @todo need to do a close that does not remove disk files
closeCache();
getCache();
{
dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
dsf.create(name);
Region region = createRegion(name, factory.create());
assertEquals(total, region.keySet().size());
assertTrue(region.containsKey("invalid") && !region.containsValueForKey("invalid"));
region.localDestroy("invalid");
total--;
assertTrue(region.containsKey("localinvalid") && !region.containsValueForKey("localinvalid"));
region.localDestroy("localinvalid");
total--;
assertArrayEquals(new byte[0], region.get("byteArray"));
region.localDestroy("byteArray");
total--;
assertEquals("modified", region.get("modified"));
region.localDestroy("modified");
total--;
}
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionDUnitTest method testRemoteUpdates.
/**
* Makes sure that updates from other VMs cause existing entries to be written to disk.
*/
@Test
public void testRemoteUpdates() throws Exception {
final String name = this.getUniqueName();
SerializableRunnable create = new CacheSerializableRunnable("Create region") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_NO_ACK);
factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(2, null, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
createRegion(name, factory.create());
}
};
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(create);
vm1.invoke(create);
vm0.invoke(new CacheSerializableRunnable("Fill Region") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// DiskRegion dr = region.getDiskRegion();
LRUStatistics lruStats = getLRUStats(region);
int i;
for (i = 0; lruStats.getEvictions() <= 0; i++) {
region.put(new Integer(i), new short[250]);
}
assertTrue(i > 5);
}
});
vm1.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// LRUStatistics lruStats = getLRUStats(region);
for (int i = 0; i < 10; i++) {
region.put(new Integer(i), new int[250]);
}
}
});
vm0.invoke(new CacheSerializableRunnable("Verify overflow") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// DiskRegion dr = region.getDiskRegion();
final LRUStatistics lruStats = getLRUStats(region);
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return lruStats.getEvictions() > 6;
}
public String description() {
return "waiting for evictions to exceed 6";
}
};
Wait.waitForCriterion(ev, 5 * 1000, 200, true);
// DiskRegionStats diskStats = dr.getStats();
// assertTrue(diskStats.getWrites() > 6);
}
});
vm0.invoke(new CacheSerializableRunnable("Populate with byte[]") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// LRUStatistics lruStats = getLRUStats(region);
for (int i = 0; i < 10000; i++) {
region.put(String.valueOf(i), String.valueOf(i).getBytes());
}
}
});
vm1.invoke(new CacheSerializableRunnable("Get with byte[]") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// LRUStatistics lruStats = getLRUStats(region);
for (int i = 0; i < 10000; i++) {
byte[] bytes = (byte[]) region.get(String.valueOf(i));
assertEquals(String.valueOf(i), new String(bytes));
}
}
});
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionDUnitTest method testEvictValueOnRegionWithEvictionAttributes.
/**
* Tests calling region.evictValue() on region with eviction-attribute set.
*/
@Test
public void testEvictValueOnRegionWithEvictionAttributes() throws Exception {
final String name = this.getUniqueName() + "testRegionEvictValue";
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
int size = 200;
for (int i = 0; i < size; i++) {
region.put("Key-" + i, new Integer(i));
}
// Evict alternate values.
for (int i = 0; i < size / 4; i++) {
try {
((LocalRegion) region).evictValue("Key-" + i);
fail("Should have thrown exception with, evictValue not supported on region with eviction attributes.");
} catch (Exception ex) {
// Expected exception.
// continue.
}
}
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionDUnitTest method testLRUCapacityController.
/**
* Tests disk overflow with an entry-based {@link LRUCapacityController}.
*/
@Test
public void testLRUCapacityController() throws CacheException {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1000, EvictionAction.OVERFLOW_TO_DISK));
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
factory.setDiskSynchronous(true);
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
flush(region);
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, lruStats.getEvictions());
// Put in larger stuff until we start evicting
for (int i = 1; i <= 1000; i++) {
// System.out.println("total " + i + ", evictions " +
// lruStats.getEvictions());
Object key = new Integer(i);
Object value = String.valueOf(i);
region.put(key, value);
assertEquals(i, lruStats.getCounter());
assertEquals(0, lruStats.getEvictions());
assertEquals("On iteration " + i, 0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
}
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
// Add a new value
region.put(new Integer(1000 + 1), String.valueOf(1000 + 1));
assertEquals(1000, lruStats.getCounter());
assertEquals(1, lruStats.getEvictions());
assertEquals(1, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(1, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
// Add another new value
region.put(new Integer(1000 + 2), String.valueOf(1000 + 2));
assertEquals(1000, lruStats.getCounter());
assertEquals(2, lruStats.getEvictions());
assertEquals(2, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(2, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
// Replace a value
region.put(new Integer(1000), String.valueOf(1000));
assertEquals(1000, lruStats.getCounter());
assertEquals(2, lruStats.getEvictions());
assertEquals(2, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(2, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
}
Aggregations