use of org.apache.geode.internal.cache.RegionEntry in project geode by apache.
the class MultiVMRegionTestCase method testRemoteCacheWriter.
/**
* Tests that a remote {@link CacheWriter} is invoked and that <code>CacheWriter</code> arguments
* and {@link CacheWriterException}s are propagated appropriately.
*/
@Test
public void testRemoteCacheWriter() throws Exception {
assertTrue(getRegionAttributes().getScope().isDistributed());
final String name = this.getUniqueName();
final Object key = "KEY";
final Object oldValue = "OLD_VALUE";
final Object newValue = "NEW_VALUE";
final Object arg = "ARG";
final Object exception = "EXCEPTION";
final Object key2 = "KEY2";
final Object value2 = "VALUE2";
SerializableRunnable create = new CacheSerializableRunnable("Create Region") {
@Override
public void run2() throws CacheException {
Region region = createRegion(name);
// Put key2 in the region before any callbacks are
// registered, so it can be destroyed later
region.put(key2, value2);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
LocalRegion reRegion;
reRegion = (LocalRegion) region;
RegionEntry re = reRegion.getRegionEntry(key2);
StoredObject so = (StoredObject) re._getValue();
assertEquals(1, so.getRefCount());
assertEquals(1, ma.getStats().getObjects());
}
}
};
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(create);
vm1.invoke(create);
//////// Create
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeCreate2(EntryEvent event) throws CacheWriterException {
if (exception.equals(event.getCallbackArgument())) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isCreate());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(null, event.getOldValue());
assertEquals(oldValue, event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
flushIfNecessary(region);
}
});
vm0.invoke(new CacheSerializableRunnable("Create with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.put(key, oldValue, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
assertNull(region.getEntry(key));
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Create with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.put(key, oldValue, arg);
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
LocalRegion reRegion;
reRegion = (LocalRegion) region;
StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
assertEquals(1, so.getRefCount());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Update
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeUpdate2(EntryEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isUpdate());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(oldValue, event.getOldValue());
assertEquals(newValue, event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Update with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.put(key, newValue, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
Region.Entry entry = region.getEntry(key);
assertEquals(oldValue, entry.getValue());
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
LocalRegion reRegion;
reRegion = (LocalRegion) region;
StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
assertEquals(1, so.getRefCount());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Update with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.put(key, newValue, arg);
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Destroy
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeDestroy2(EntryEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isDestroy());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(newValue, event.getOldValue());
assertNull(event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.destroy(key, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
assertNotNull(region.getEntry(key));
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.destroy(key, arg);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Region Destroy
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeRegionDestroy2(RegionEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isRegionDestroy());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.destroyRegion(exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
if (region.isDestroyed()) {
fail("should not have an exception if region is destroyed", ex);
}
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
region.destroyRegion(arg);
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
final MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
WaitCriterion waitForStatChange = new WaitCriterion() {
@Override
public boolean done() {
return ma.getStats().getObjects() == 0;
}
@Override
public String description() {
return "never saw off-heap object count go to zero. Last value was " + ma.getStats().getObjects();
}
};
Wait.waitForCriterion(waitForStatChange, 3000, 10, true);
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
}
use of org.apache.geode.internal.cache.RegionEntry in project geode by apache.
the class PRBucketSynchronizationDUnitTest method verifySynchronized.
private void verifySynchronized(VM vm, final InternalDistributedMember crashedMember) {
vm.invoke(new SerializableCallable("check that synchronization happened") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) TestRegion;
final BucketRegion bucket = pr.getDataStore().getLocalBucketById(0);
Wait.waitForCriterion(new WaitCriterion() {
String waitingFor = "primary is still in membership view: " + crashedMember;
boolean dumped = false;
public boolean done() {
if (TestRegion.getCache().getDistributionManager().isCurrentMember(crashedMember)) {
LogWriterUtils.getLogWriter().info(waitingFor);
return false;
}
if (!TestRegion.containsKey("Object3")) {
waitingFor = "entry for Object3 not found";
LogWriterUtils.getLogWriter().info(waitingFor);
return false;
}
RegionEntry re = bucket.getRegionMap().getEntry("Object5");
if (re == null) {
if (!dumped) {
dumped = true;
bucket.dumpBackingMap();
}
waitingFor = "entry for Object5 not found";
LogWriterUtils.getLogWriter().info(waitingFor);
return false;
}
if (!re.isTombstone()) {
if (!dumped) {
dumped = true;
bucket.dumpBackingMap();
}
waitingFor = "Object5 is not a tombstone but should be: " + re;
LogWriterUtils.getLogWriter().info(waitingFor);
return false;
}
return true;
}
public String description() {
return waitingFor;
}
}, 30000, 5000, true);
return null;
}
});
}
use of org.apache.geode.internal.cache.RegionEntry in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testSkipConflictChecksForConcurrentOps.
/**
* Test that we skip conflict checks with entries that are on disk compared to entries that come
* in as part of a concurrent operation
*/
@Test
public void testSkipConflictChecksForConcurrentOps() throws Throwable {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
// Create the region in few members to test recovery
createPersistentRegion(vm0);
createPersistentRegion(vm1);
// Create an update some entries in vm0 and vm1.
createData(vm0, 0, 1, "value1");
createData(vm0, 0, 1, "value2");
createData(vm0, 0, 1, "value2");
closeCache(vm1);
// Update the keys in vm0 until the entry version rolls over.
// This means that if we did a conflict check, vm0's key will have
// a lower entry version than vm1, which would cause us to prefer vm1's
// value
SerializableRunnable createData = new SerializableRunnable("rollEntryVersion") {
public void run() {
Cache cache = getCache();
LocalRegion region = (LocalRegion) cache.getRegion(REGION_NAME);
region.put(0, "value3");
RegionEntry entry = region.getRegionEntry(0);
entry = region.getRegionEntry(0);
// Sneak in and change the version number for an entry to generate
// a conflict.
VersionTag tag = entry.getVersionStamp().asVersionTag();
tag.setEntryVersion(tag.getEntryVersion() - 2);
entry.getVersionStamp().setVersions(tag);
}
};
vm0.invoke(createData);
// Add an observer to vm0 which will perform a concurrent operation during
// the GII. If we do a conflict check, this operation will be rejected
// because it will have a lower entry version that what vm1 recovered from
// disk
vm0.invoke(new SerializableRunnable() {
@Override
public void run() {
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage msg) {
if (msg instanceof InitialImageOperation.RequestImageMessage) {
if (((InitialImageOperation.RequestImageMessage) msg).regionPath.contains(REGION_NAME)) {
createData(vm0, 0, 1, "value4");
DistributionMessageObserver.setInstance(null);
}
}
}
});
}
});
// Create vm1, doing a GII
createPersistentRegion(vm1);
// If we did a conflict check, this would be value2
checkData(vm0, 0, 1, "value4");
checkData(vm1, 0, 1, "value4");
}
use of org.apache.geode.internal.cache.RegionEntry in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testConflictChecksDuringConcurrentDeltaGIIAndOtherOp.
/**
* This test creates 2 VMs in a distributed system with a persistent PartitionedRegion and one VM
* (VM1) puts an entry in region. Second VM (VM2) starts later and does a delta GII. During Delta
* GII in VM2 a DESTROY operation happens in VM1 and gets propagated to VM2 concurrently with GII.
* At this point if entry version is greater than the once received from GII then it must not get
* applied. Which is Bug #45921.
*
*/
@Test
public void testConflictChecksDuringConcurrentDeltaGIIAndOtherOp() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(new CacheSerializableRunnable("Create PR and put an entry") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
PartitionAttributes attrs = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(1).create();
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(attrs);
RegionAttributes rAttrs = factory.create();
Region region = cache.createRegionFactory(rAttrs).create("prRegion");
region.put("testKey", "testValue");
assertEquals(1, region.size());
}
});
// Create a cache and region, do an update to change the version no. and
// restart the cache and region.
vm1.invoke(new CacheSerializableRunnable("Create PR and put an entry") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
PartitionAttributes attrs = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(1).create();
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(attrs);
RegionAttributes rAttrs = factory.create();
Region region = cache.createRegionFactory(rAttrs).create("prRegion");
region.put("testKey", "testValue2");
cache.close();
// Restart
cache = getCache();
region = cache.createRegionFactory(rAttrs).create("prRegion");
}
});
// Do a DESTROY in vm0 when delta GII is in progress in vm1 (Hopefully, Not
// guaranteed).
AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Detroy entry in region") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region region = cache.getRegion("prRegion");
while (!region.get("testKey").equals("testValue2")) {
Wait.pause(100);
}
region.destroy("testKey");
}
});
try {
async.join(3000);
} catch (InterruptedException e) {
new AssertionError("VM1 entry destroy did not finish in 3000 ms");
}
vm1.invoke(new CacheSerializableRunnable("Verifying entry version in new node VM1") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region region = cache.getRegion("prRegion");
Region.Entry entry = ((PartitionedRegion) region).getEntry("testKey", true);
RegionEntry re = ((EntrySnapshot) entry).getRegionEntry();
LogWriterUtils.getLogWriter().fine("RegionEntry for testKey: " + re.getKey() + " " + re.getValueInVM((LocalRegion) region));
assertTrue(re.getValueInVM((LocalRegion) region) instanceof Tombstone);
VersionTag tag = re.getVersionStamp().asVersionTag();
assertEquals(3, /* Two puts and a Destroy */
tag.getEntryVersion());
}
});
closeCache(vm0);
closeCache(vm1);
}
use of org.apache.geode.internal.cache.RegionEntry in project geode by apache.
the class RegionJUnitTest method testRegionEntryAccess.
@Test
public void testRegionEntryAccess() throws Exception {
Iterator entriesIter = region.entrySet(false).iterator();
while (entriesIter.hasNext()) {
Region.Entry entry = (Region.Entry) entriesIter.next();
RegionEntry regionEntry = null;
if (entry instanceof LocalRegion.NonTXEntry) {
regionEntry = ((LocalRegion.NonTXEntry) entry).getRegionEntry();
} else {
regionEntry = ((EntrySnapshot) entry).getRegionEntry();
}
assertNotNull(regionEntry);
}
LocalRegion lRegion = (LocalRegion) region;
Iterator keysIterator = lRegion.keys().iterator();
while (keysIterator.hasNext()) {
Object key = keysIterator.next();
Region.Entry rEntry = lRegion.getEntry(key);
RegionEntry regionEntry = null;
if (rEntry instanceof LocalRegion.NonTXEntry) {
regionEntry = ((LocalRegion.NonTXEntry) rEntry).getRegionEntry();
} else {
regionEntry = ((EntrySnapshot) rEntry).getRegionEntry();
}
assertNotNull(regionEntry);
}
}
Aggregations