use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class MultiVMRegionTestCase method testRemoteCacheWriter.
/**
* Tests that a remote {@link CacheWriter} is invoked and that <code>CacheWriter</code> arguments
* and {@link CacheWriterException}s are propagated appropriately.
*/
@Test
public void testRemoteCacheWriter() throws Exception {
assertTrue(getRegionAttributes().getScope().isDistributed());
final String name = this.getUniqueName();
final Object key = "KEY";
final Object oldValue = "OLD_VALUE";
final Object newValue = "NEW_VALUE";
final Object arg = "ARG";
final Object exception = "EXCEPTION";
final Object key2 = "KEY2";
final Object value2 = "VALUE2";
SerializableRunnable create = new CacheSerializableRunnable("Create Region") {
@Override
public void run2() throws CacheException {
Region region = createRegion(name);
// Put key2 in the region before any callbacks are
// registered, so it can be destroyed later
region.put(key2, value2);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
LocalRegion reRegion;
reRegion = (LocalRegion) region;
RegionEntry re = reRegion.getRegionEntry(key2);
StoredObject so = (StoredObject) re._getValue();
assertEquals(1, so.getRefCount());
assertEquals(1, ma.getStats().getObjects());
}
}
};
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(create);
vm1.invoke(create);
//////// Create
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeCreate2(EntryEvent event) throws CacheWriterException {
if (exception.equals(event.getCallbackArgument())) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isCreate());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(null, event.getOldValue());
assertEquals(oldValue, event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
flushIfNecessary(region);
}
});
vm0.invoke(new CacheSerializableRunnable("Create with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.put(key, oldValue, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
assertNull(region.getEntry(key));
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Create with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.put(key, oldValue, arg);
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
LocalRegion reRegion;
reRegion = (LocalRegion) region;
StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
assertEquals(1, so.getRefCount());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Update
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeUpdate2(EntryEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isUpdate());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(oldValue, event.getOldValue());
assertEquals(newValue, event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Update with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.put(key, newValue, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
Region.Entry entry = region.getEntry(key);
assertEquals(oldValue, entry.getValue());
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
LocalRegion reRegion;
reRegion = (LocalRegion) region;
StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
assertEquals(1, so.getRefCount());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Update with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.put(key, newValue, arg);
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Destroy
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeDestroy2(EntryEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isDestroy());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(newValue, event.getOldValue());
assertNull(event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.destroy(key, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
assertNotNull(region.getEntry(key));
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.destroy(key, arg);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Region Destroy
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeRegionDestroy2(RegionEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isRegionDestroy());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.destroyRegion(exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
if (region.isDestroyed()) {
fail("should not have an exception if region is destroyed", ex);
}
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
region.destroyRegion(arg);
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
final MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
WaitCriterion waitForStatChange = new WaitCriterion() {
@Override
public boolean done() {
return ma.getStats().getObjects() == 0;
}
@Override
public String description() {
return "never saw off-heap object count go to zero. Last value was " + ma.getStats().getObjects();
}
};
Wait.waitForCriterion(waitForStatChange, 3000, 10, true);
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
}
use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class MultiVMRegionTestCase method testUpdateResetsIdleTime.
/**
* Tests to makes sure that a distributed update resets the expiration timer.
*/
@Test
public void testUpdateResetsIdleTime() throws Exception {
final String name = this.getUniqueName();
// test no longer waits for this timeout to expire
// seconds
final int timeout = 90;
final Object key = "KEY";
final Object value = "VALUE";
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(new CacheSerializableRunnable("Create with Idle") {
@Override
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory(getRegionAttributes());
factory.setStatisticsEnabled(true);
ExpirationAttributes expire = new ExpirationAttributes(timeout, ExpirationAction.DESTROY);
factory.setEntryIdleTimeout(expire);
LocalRegion region = (LocalRegion) createRegion(name, factory.create());
if (region.getDataPolicy().withPartitioning()) {
// Force all buckets to be created locally so the
// test will know that the create happens in this vm
// and the update (in vm1) is remote.
PartitionRegionHelper.assignBucketsToPartitions(region);
}
region.create(key, null);
EntryExpiryTask eet = region.getEntryExpiryTask(key);
region.create("createExpiryTime", eet.getExpirationTime());
Wait.waitForExpiryClockToChange(region);
}
});
vm1.invoke(new CacheSerializableRunnable("Create Region " + name) {
@Override
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory(getRegionAttributes());
factory.setStatisticsEnabled(true);
ExpirationAttributes expire = new ExpirationAttributes(timeout, ExpirationAction.DESTROY);
factory.setEntryIdleTimeout(expire);
if (getRegionAttributes().getPartitionAttributes() != null) {
createRegion(name, factory.create());
} else {
createRegion(name);
}
}
});
vm1.invoke(new CacheSerializableRunnable("Update entry") {
@Override
public void run2() throws CacheException {
final Region r = getRootRegion().getSubregion(name);
assertNotNull(r);
r.put(key, value);
}
});
vm0.invoke(new CacheSerializableRunnable("Verify reset") {
@Override
public void run2() throws CacheException {
final LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
// wait for update to reach us from vm1 (needed if no-ack)
WaitCriterion waitForUpdate = new WaitCriterion() {
@Override
public boolean done() {
return value.equals(region.get(key));
}
@Override
public String description() {
return "never saw update of " + key;
}
};
Wait.waitForCriterion(waitForUpdate, 3000, 10, true);
EntryExpiryTask eet = region.getEntryExpiryTask(key);
long createExpiryTime = (Long) region.get("createExpiryTime");
long updateExpiryTime = eet.getExpirationTime();
if (updateExpiryTime - createExpiryTime <= 0L) {
fail("update did not reset the expiration time. createExpiryTime=" + createExpiryTime + " updateExpiryTime=" + updateExpiryTime);
}
}
});
}
use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class LRUEvictionControllerDUnitTest method testReplicationAndTransactions.
/**
* Create two regions, one a "feed" that performs transactions which are replicated to a region
* with an Entry LRU set to one Asserts that the LRU rules are observed
*
* @throws Exception
*/
@Test
public void testReplicationAndTransactions() throws Exception {
final String r1 = this.getUniqueName() + "-1";
final String r2 = this.getUniqueName() + "-2";
final String r3 = this.getUniqueName() + "-3";
VM feeder = Host.getHost(0).getVM(3);
VM repl = Host.getHost(0).getVM(2);
final int maxEntries = 1;
final int numEntries = 10000;
final int txBatchSize = 10;
// need at least one batch
assertTrue(numEntries > txBatchSize);
CacheSerializableRunnable createRegion = new CacheSerializableRunnable("Create Replicate Region") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeapEnabled());
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.OVERFLOW_TO_DISK));
factory.setDataPolicy(DataPolicy.REPLICATE);
File[] diskDirs = new File[1];
diskDirs[0] = new File("overflowDir/" + OSProcess.getId());
diskDirs[0].mkdirs();
factory.setDiskStoreName(getCache().createDiskStoreFactory().setDiskDirs(diskDirs).create("LRUEvictionControllerDUnitTest").getName());
factory.setDiskSynchronous(true);
factory.setScope(Scope.DISTRIBUTED_ACK);
RegionAttributes a = factory.create();
createRegion(r1, a);
createRegion(r2, a);
createRegion(r3, a);
}
};
feeder.invoke(createRegion);
repl.invoke(createRegion);
feeder.invoke(new CacheSerializableRunnable("put " + numEntries + " entries and assert " + maxEntries + " max entries") {
public void run2() throws CacheException {
Cache c = getCache();
CacheTransactionManager txm = c.getCacheTransactionManager();
Region reg1 = getRootRegion().getSubregion(r1);
assertNotNull(reg1);
Region reg2 = getRootRegion().getSubregion(r2);
assertNotNull(reg2);
Region reg3 = getRootRegion().getSubregion(r3);
assertNotNull(reg3);
boolean startTx = false;
final Region[] r = { reg1, reg2, reg3 };
for (int i = 0; i < numEntries; i++) {
if (i % txBatchSize == 0) {
txm.begin();
startTx = true;
}
reg1.create("r1-key-" + i, "r1-value-" + i);
reg2.create("r2-key-" + i, "r2-value-" + i);
reg3.create("r3-key-" + i, "r3-value-" + i);
if (i % txBatchSize == (txBatchSize - 1)) {
txm.commit();
try {
// allow stats to get a sample in
Thread.sleep(20);
} catch (InterruptedException ie) {
fail("interrupted");
}
startTx = false;
}
}
if (startTx) {
txm.commit();
}
for (int i = 0; i < r.length; i++) {
assertEquals(numEntries, r[i].size());
{
LocalRegion lr = (LocalRegion) r[i];
assertEquals(maxEntries, lr.getEvictionController().getLRUHelper().getStats().getLimit());
assertEquals(maxEntries, lr.getEvictionController().getLRUHelper().getStats().getCounter());
}
}
}
});
repl.invoke(new CacheSerializableRunnable("Replicate asserts " + maxEntries + " max entries") {
public void run2() throws CacheException {
getCache();
Region reg1 = getRootRegion().getSubregion(r1);
Region reg2 = getRootRegion().getSubregion(r2);
Region reg3 = getRootRegion().getSubregion(r3);
final Region[] r = { reg1, reg2, reg3 };
for (int i = 0; i < r.length; i++) {
assertNotNull(r[i]);
assertEquals(numEntries, r[i].size());
{
LocalRegion lr = (LocalRegion) r[i];
assertEquals(maxEntries, lr.getEvictionController().getLRUHelper().getStats().getLimit());
assertEquals(maxEntries, lr.getEvictionController().getLRUHelper().getStats().getCounter());
}
}
}
});
}
use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class LRUEvictionControllerDUnitTest method testMultipleUsesOfEvictionAttributes.
/**
* Tests that a single set of eviction attributes can be used multiple times (and does the right
* thing).
*/
@Test
public void testMultipleUsesOfEvictionAttributes() throws CacheException, CloneNotSupportedException {
int threshold = 42;
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(threshold));
Region region = createRegion(name, factory.create());
RegionAttributes ra = region.getAttributes();
Region r2 = createRegion(name + 2, ra);
factory = new AttributesFactory(ra);
Region r3 = createRegion(name + 3, factory.create());
assertEquals(region.getAttributes().getEvictionAttributes(), r2.getAttributes().getEvictionAttributes());
assertEquals(r2.getAttributes().getEvictionAttributes(), r3.getAttributes().getEvictionAttributes());
{
LocalRegion lRegion = (LocalRegion) region;
LocalRegion lr2 = (LocalRegion) r2;
LocalRegion lr3 = (LocalRegion) r3;
assertNotSame(lRegion.getEvictionController(), lr2.getEvictionController());
assertEquals(lRegion.getEvictionController(), lr2.getEvictionController());
assertNotSame(lr2.getEvictionController(), lr3.getEvictionController());
assertEquals(lr2.getEvictionController(), lr3.getEvictionController());
}
}
use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class DiskRegionPersistOnlySyncJUnitTest method testPopulate5kbwrites.
@Test
public void testPopulate5kbwrites() {
ENTRY_SIZE = 1024 * 5;
/*
* OP_COUNT can be increased/decrease as per the requirement. If required to be set as higher
* value such as 1000000 one needs to set the VM heap size accordingly. (For example:Default
* setting in build.xml is <jvmarg value="-Xmx256M"/>
*
*/
OP_COUNT = 1000;
UNIQUE_KEYS = Boolean.getBoolean("DRP.UNIQUE_KEYS");
RegionAttributes ra = region.getAttributes();
// final String key = "K";
final byte[] value = new byte[ENTRY_SIZE];
Arrays.fill(value, (byte) 77);
String config = "ENTRY_SIZE=" + ENTRY_SIZE + " OP_COUNT=" + OP_COUNT + " UNIQUE_KEYS=" + UNIQUE_KEYS + " opLogEnabled=" + !Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "disableOpLog") + " syncWrites=" + Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "syncWrites");
if (ra.getDiskStoreName() != null) {
config += " diskStoreName=" + ra.getDiskStoreName();
} else {
config += " [" + ra.getDiskWriteAttributes() + "]";
}
log.info(config);
long startTime = System.currentTimeMillis();
if (UNIQUE_KEYS) {
for (int i = 0; i < OP_COUNT; i++) {
region.put(new Integer(i), value);
}
} else {
for (int i = 0; i < OP_COUNT; i++) {
region.put("" + (i + 10000), value);
}
}
long endTime = System.currentTimeMillis();
// region.close(); // closes disk file which will flush all buffers
((LocalRegion) region).forceFlush();
long et = endTime - startTime;
long etSecs = et / 1000;
long opPerSec = etSecs == 0 ? 0 : (OP_COUNT / (et / 1000));
long bytesPerSec = etSecs == 0 ? 0 : ((OP_COUNT * ENTRY_SIZE) / (et / 1000));
String stats = "et=" + et + "ms writes/sec=" + opPerSec + " bytes/sec=" + bytesPerSec;
log.info(stats);
System.out.println("Stats for 5kb writes :" + stats);
}
Aggregations