use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class ParallelGatewaySenderQueueOverflowDUnitTest method test_ValidateParallelGatewaySenderQueueAttributes_1.
/**
* Test to validate that ParallelGatewaySenderQueue diskSynchronous attribute when persistence of
* sender is enabled.
*/
@Ignore("TODO: test is disabled")
@Test
public void test_ValidateParallelGatewaySenderQueueAttributes_1() {
Integer localLocPort = (Integer) vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId(1));
Integer remoteLocPort = (Integer) vm1.invoke(() -> WANTestBase.createFirstRemoteLocator(2, localLocPort));
WANTestBase test = new WANTestBase();
Properties props = test.getDistributedSystemProperties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "localhost[" + localLocPort + "]");
InternalDistributedSystem ds = test.getSystem(props);
cache = CacheFactory.create(ds);
File directory = new File("TKSender" + "_disk_" + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
directory.mkdir();
File[] dirs1 = new File[] { directory };
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(dirs1);
DiskStore diskStore = dsf.create("FORNY");
GatewaySenderFactory fact = cache.createGatewaySenderFactory();
// set parallel to true
fact.setParallel(true);
fact.setBatchConflationEnabled(true);
fact.setBatchSize(200);
fact.setBatchTimeInterval(300);
// enable the persistence
fact.setPersistenceEnabled(true);
fact.setDiskSynchronous(true);
fact.setDiskStoreName("FORNY");
fact.setMaximumQueueMemory(200);
fact.setAlertThreshold(1200);
GatewayEventFilter myEventFilter1 = new MyGatewayEventFilter1();
fact.addGatewayEventFilter(myEventFilter1);
GatewayTransportFilter myStreamFilter1 = new MyGatewayTransportFilter1();
fact.addGatewayTransportFilter(myStreamFilter1);
GatewayTransportFilter myStreamFilter2 = new MyGatewayTransportFilter2();
fact.addGatewayTransportFilter(myStreamFilter2);
final IgnoredException exTKSender = IgnoredException.addIgnoredException("Could not connect");
try {
GatewaySender sender1 = fact.create("TKSender", 2);
AttributesFactory factory = new AttributesFactory();
factory.addGatewaySenderId(sender1.getId());
factory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
Region region = cache.createRegionFactory(factory.create()).create("test_ValidateGatewaySenderAttributes");
Set<GatewaySender> senders = cache.getGatewaySenders();
assertEquals(senders.size(), 1);
GatewaySender gatewaySender = senders.iterator().next();
Set<RegionQueue> regionQueues = ((AbstractGatewaySender) gatewaySender).getQueues();
assertEquals(regionQueues.size(), 1);
RegionQueue regionQueue = regionQueues.iterator().next();
assertEquals(true, regionQueue.getRegion().getAttributes().isDiskSynchronous());
} finally {
exTKSender.remove();
}
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskRegionDUnitTest method testBackup.
@Test
public void testBackup() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
factory.setDiskSynchronous(true);
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
int total = 10;
{
Region region = createRegion(name, factory.create());
for (int i = 0; i < total; i++) {
String s = String.valueOf(i);
region.put(s, s);
}
region.put("foobar", "junk");
region.localDestroy("foobar");
region.put("foobar2", "junk");
flush(region);
region.localDestroy("foobar2");
// test invalidate
region.put("invalid", "invalid");
flush(region);
region.invalidate("invalid");
flush(region);
assertTrue(region.containsKey("invalid") && !region.containsValueForKey("invalid"));
total++;
// test local-invalidate
region.put("localinvalid", "localinvalid");
flush(region);
region.localInvalidate("localinvalid");
flush(region);
assertTrue(region.containsKey("localinvalid") && !region.containsValueForKey("localinvalid"));
total++;
// test byte[] values
region.put("byteArray", new byte[0]);
flush(region);
assertArrayEquals(new byte[0], region.get("byteArray"));
total++;
// test modification
region.put("modified", "originalValue");
flush(region);
region.put("modified", "modified");
flush(region);
assertEquals("modified", region.get("modified"));
total++;
assertEquals(total, region.keySet().size());
}
// @todo need to do a close that does not remove disk files
closeCache();
getCache();
{
dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
dsf.create(name);
Region region = createRegion(name, factory.create());
assertEquals(total, region.keySet().size());
assertTrue(region.containsKey("invalid") && !region.containsValueForKey("invalid"));
region.localDestroy("invalid");
total--;
assertTrue(region.containsKey("localinvalid") && !region.containsValueForKey("localinvalid"));
region.localDestroy("localinvalid");
total--;
assertArrayEquals(new byte[0], region.get("byteArray"));
region.localDestroy("byteArray");
total--;
assertEquals("modified", region.get("modified"));
region.localDestroy("modified");
total--;
}
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskRegionDUnitTest method testRemoteUpdates.
/**
* Makes sure that updates from other VMs cause existing entries to be written to disk.
*/
@Test
public void testRemoteUpdates() throws Exception {
final String name = this.getUniqueName();
SerializableRunnable create = new CacheSerializableRunnable("Create region") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_NO_ACK);
factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(2, null, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
createRegion(name, factory.create());
}
};
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(create);
vm1.invoke(create);
vm0.invoke(new CacheSerializableRunnable("Fill Region") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// DiskRegion dr = region.getDiskRegion();
LRUStatistics lruStats = getLRUStats(region);
int i;
for (i = 0; lruStats.getEvictions() <= 0; i++) {
region.put(new Integer(i), new short[250]);
}
assertTrue(i > 5);
}
});
vm1.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// LRUStatistics lruStats = getLRUStats(region);
for (int i = 0; i < 10; i++) {
region.put(new Integer(i), new int[250]);
}
}
});
vm0.invoke(new CacheSerializableRunnable("Verify overflow") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// DiskRegion dr = region.getDiskRegion();
final LRUStatistics lruStats = getLRUStats(region);
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return lruStats.getEvictions() > 6;
}
public String description() {
return "waiting for evictions to exceed 6";
}
};
Wait.waitForCriterion(ev, 5 * 1000, 200, true);
// DiskRegionStats diskStats = dr.getStats();
// assertTrue(diskStats.getWrites() > 6);
}
});
vm0.invoke(new CacheSerializableRunnable("Populate with byte[]") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// LRUStatistics lruStats = getLRUStats(region);
for (int i = 0; i < 10000; i++) {
region.put(String.valueOf(i), String.valueOf(i).getBytes());
}
}
});
vm1.invoke(new CacheSerializableRunnable("Get with byte[]") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// LRUStatistics lruStats = getLRUStats(region);
for (int i = 0; i < 10000; i++) {
byte[] bytes = (byte[]) region.get(String.valueOf(i));
assertEquals(String.valueOf(i), new String(bytes));
}
}
});
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskRegionDUnitTest method testEvictValueOnRegionWithEvictionAttributes.
/**
* Tests calling region.evictValue() on region with eviction-attribute set.
*/
@Test
public void testEvictValueOnRegionWithEvictionAttributes() throws Exception {
final String name = this.getUniqueName() + "testRegionEvictValue";
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
int size = 200;
for (int i = 0; i < size; i++) {
region.put("Key-" + i, new Integer(i));
}
// Evict alternate values.
for (int i = 0; i < size / 4; i++) {
try {
((LocalRegion) region).evictValue("Key-" + i);
fail("Should have thrown exception with, evictValue not supported on region with eviction attributes.");
} catch (Exception ex) {
// Expected exception.
// continue.
}
}
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskRegionDUnitTest method testLRUCapacityController.
/**
* Tests disk overflow with an entry-based {@link LRUCapacityController}.
*/
@Test
public void testLRUCapacityController() throws CacheException {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1000, EvictionAction.OVERFLOW_TO_DISK));
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
factory.setDiskSynchronous(true);
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
dsf.setDiskDirs(new File[] { d });
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
flush(region);
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, lruStats.getEvictions());
// Put in larger stuff until we start evicting
for (int i = 1; i <= 1000; i++) {
// System.out.println("total " + i + ", evictions " +
// lruStats.getEvictions());
Object key = new Integer(i);
Object value = String.valueOf(i);
region.put(key, value);
assertEquals(i, lruStats.getCounter());
assertEquals(0, lruStats.getEvictions());
assertEquals("On iteration " + i, 0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
}
assertEquals(0, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(0, diskStats.getNumOverflowOnDisk());
// Add a new value
region.put(new Integer(1000 + 1), String.valueOf(1000 + 1));
assertEquals(1000, lruStats.getCounter());
assertEquals(1, lruStats.getEvictions());
assertEquals(1, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(1, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
// Add another new value
region.put(new Integer(1000 + 2), String.valueOf(1000 + 2));
assertEquals(1000, lruStats.getCounter());
assertEquals(2, lruStats.getEvictions());
assertEquals(2, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(2, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
// Replace a value
region.put(new Integer(1000), String.valueOf(1000));
assertEquals(1000, lruStats.getCounter());
assertEquals(2, lruStats.getEvictions());
assertEquals(2, diskStats.getWrites());
assertEquals(0, diskStats.getReads());
assertEquals(2, diskStats.getNumOverflowOnDisk());
assertEquals(1000, diskStats.getNumEntriesInVM());
}
Aggregations