use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionDUnitTest method fillUpOverflowRegion.
/**
* Tests that an {@link IllegalStateException} is thrown when the region is full of keys and
* entries.
*/
public void fillUpOverflowRegion() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
for (int i = 0; i < 10000; i++) {
int[] array = new int[1000];
array[0] = i;
try {
region.put(array, new Integer(i));
} catch (IllegalStateException ex) {
String message = ex.getMessage();
assertTrue(message.indexOf("is full with") != -1);
return;
}
}
fail("Should have thrown an IllegalStateException");
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionDUnitTest method testDistributedInvalidate.
/**
* Tests that invalidates and updates received from different VMs are handled appropriately by
* overflow regions.
*/
@Test
public void testDistributedInvalidate() throws Exception {
final String name = this.getUniqueName();
SerializableRunnable create = new CacheSerializableRunnable("Create region") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
createRegion(name, factory.create());
}
};
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(create);
vm1.invoke(create);
vm0.invoke(new CacheSerializableRunnable("Fill Region") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// DiskRegion dr = region.getDiskRegion();
LRUStatistics lruStats = getLRUStats(region);
for (int i = 0; lruStats.getEvictions() < 10; i++) {
LogWriterUtils.getLogWriter().info("Put " + i);
region.put(new Integer(i), new byte[1]);
}
assertEquals(10, lruStats.getEvictions());
}
});
final Object key = new Integer(20);
vm1.invoke(new CacheSerializableRunnable("Invalidate entry") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
assertNotNull(region.get(key));
region.invalidate(key);
}
});
vm0.invoke(new CacheSerializableRunnable("Verify invalidate") {
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return region.get(key) == null;
}
public String description() {
return "value for key remains: " + key;
}
};
Wait.waitForCriterion(ev, 500, 200, true);
}
});
final String newValue = "NEW VALUE";
vm1.invoke(new CacheSerializableRunnable("Update entry") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.put(key, newValue);
}
});
vm0.invoke(new CacheSerializableRunnable("Verify update") {
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return newValue.equals(region.get(key));
}
public String description() {
return "verify update";
}
};
Wait.waitForCriterion(ev, 500, 200, true);
}
});
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionDUnitTest method testInvalidate.
/**
* Tests that once an overflowed entry is {@linkplain Region#invalidate invalidated} its value is
* gone.
*/
@Test
public void testInvalidate() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
// DiskRegion dr = ((LocalRegion) region).getDiskRegion();
// DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
// Put in larger stuff until we start evicting
int total;
for (total = 0; lruStats.getEvictions() <= 10; total++) {
int[] array = new int[250];
array[0] = total;
region.put(new Integer(total), array);
}
region.invalidate(new Integer(0));
assertNull(region.get(new Integer(0)));
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class CacheXml66DUnitTest method testDiskStoreInTemplates.
/**
* Tests that a region created with a named attributes with diskstore
*/
@Test
public void testDiskStoreInTemplates() throws Exception {
File dir = new File("west");
dir.mkdir();
dir.deleteOnExit();
dir = new File("east");
dir.mkdir();
dir.deleteOnExit();
setXmlFile(findFile("ewtest.xml"));
String regionName_west = "orders/west";
String regionName_east = "orders/east";
Cache cache = getCache();
// verify diskstores
DiskStore ds = cache.findDiskStore("persistentDiskStore1");
assertNotNull(ds);
assertEquals(500, ds.getQueueSize());
File[] dirs = ds.getDiskDirs();
assertEquals("west", dirs[0].getPath());
ds = cache.findDiskStore("persistentDiskStore2");
assertNotNull(ds);
assertEquals(500, ds.getQueueSize());
dirs = ds.getDiskDirs();
assertEquals("east", dirs[0].getPath());
// verify templates
assertNotNull(cache.getRegionAttributes("nack"));
RegionAttributes attrs = cache.getRegionAttributes("persistent");
assertEquals(DataPolicy.PERSISTENT_REPLICATE, attrs.getDataPolicy());
assertEquals(false, attrs.isDiskSynchronous());
assertEquals("persistentDiskStore1", attrs.getDiskStoreName());
Region region = cache.getRegion(regionName_west);
assertNotNull(region);
attrs = region.getAttributes();
assertEquals(DataPolicy.PERSISTENT_REPLICATE, attrs.getDataPolicy());
assertEquals(false, attrs.isDiskSynchronous());
assertEquals("persistentDiskStore1", attrs.getDiskStoreName());
region = cache.getRegion(regionName_east);
assertNotNull(region);
// Make sure that attributes can be "overridden"
attrs = region.getAttributes();
assertEquals(DataPolicy.PERSISTENT_REPLICATE, attrs.getDataPolicy());
assertEquals(false, attrs.isDiskSynchronous());
assertEquals("persistentDiskStore2", attrs.getDiskStoreName());
// bug 41934
String regionName_datap = "data-p";
region = cache.getRegion(regionName_datap);
assertNotNull(region);
attrs = region.getAttributes();
PartitionAttributes pa = attrs.getPartitionAttributes();
assertEquals(1, pa.getRedundantCopies());
assertEquals(3, pa.getTotalNumBuckets());
assertEquals(DataPolicy.PERSISTENT_PARTITION, attrs.getDataPolicy());
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class PartitionedRegionEvictionDUnitTest method testEntryLRUWithOverflowToDisk.
@Test
public void testEntryLRUWithOverflowToDisk() {
final Host host = Host.getHost(0);
final VM vm2 = host.getVM(2);
final VM vm3 = host.getVM(3);
final String uniqName = getUniqueName();
final int redundantCopies = 1;
final int maxBuckets = 8;
final int maxEntries = 16;
final String name = uniqName + "-PR";
final SerializableRunnable create = new SerializableRunnable("Create Entry LRU with Overflow to disk partitioned Region") {
public void run() {
try {
final AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setTotalNumBuckets(maxBuckets).create());
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.OVERFLOW_TO_DISK));
factory.setDiskSynchronous(true);
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
final File[] diskDirs = new File[1];
diskDirs[0] = new File("overflowDir/" + uniqName + "_" + OSProcess.getId());
diskDirs[0].mkdirs();
dsf.setDiskDirs(diskDirs);
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
final PartitionedRegion pr = (PartitionedRegion) createRootRegion(name, factory.create());
assertNotNull(pr);
} catch (final CacheException ex) {
Assert.fail("While creating Partitioned region", ex);
}
}
};
vm3.invoke(create);
vm2.invoke(create);
final int extraEntries = 4;
final SerializableRunnable createBuckets = new SerializableRunnable("Create Buckets") {
public void run() {
final PartitionedRegion pr = (PartitionedRegion) getRootRegion(name);
assertNotNull(pr);
for (int counter = 1; counter <= maxEntries + extraEntries; counter++) {
pr.put(new Integer(counter), new byte[1 * 1024 * 1024]);
}
}
};
vm3.invoke(createBuckets);
final SerializableCallable assertBucketAttributesAndEviction = new SerializableCallable("Assert bucket attributes and eviction") {
public Object call() throws Exception {
final PartitionedRegion pr = (PartitionedRegion) getRootRegion(name);
assertNotNull(pr);
// assert over-flow behavior in local buckets and number of
// entries
// overflowed
int entriesEvicted = 0;
for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i.hasNext(); ) {
final Map.Entry entry = (Map.Entry) i.next();
final BucketRegion bucketRegion = (BucketRegion) entry.getValue();
if (bucketRegion == null) {
continue;
}
assertTrue(bucketRegion.getAttributes().getEvictionAttributes().getAlgorithm().isLRUEntry());
assertTrue(bucketRegion.getAttributes().getEvictionAttributes().getAction().isOverflowToDisk());
}
entriesEvicted += pr.getDiskRegionStats().getNumOverflowOnDisk();
return new Integer(entriesEvicted);
}
};
final Integer vm2i = (Integer) vm2.invoke(assertBucketAttributesAndEviction);
final Integer vm3i = (Integer) vm3.invoke(assertBucketAttributesAndEviction);
final int totalEvicted = vm2i.intValue() + vm3i.intValue();
assertEquals(extraEntries * 2, totalEvicted);
}
Aggregations