use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class MultiVMRegionTestCase method testEntryTtlDestroyEvent.
/**
* Tests that an entry in a distributed region that expires with a distributed destroy causes an
* event in other VM with isExpiration flag set.
*/
// GEODE-583: time sensitive, expiration, waitForCriterion, short
@Category(FlakyTest.class)
// timeouts
@Test
public void testEntryTtlDestroyEvent() throws Exception {
assumeTrue(getRegionAttributes().getPartitionAttributes() == null);
final String name = this.getUniqueName();
// ms
final int timeout = 22;
final Object key = "KEY";
final Object value = "VALUE";
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
class DestroyListener extends TestCacheListener {
boolean eventIsExpiration = false;
@Override
public void afterDestroyBeforeAddEvent(EntryEvent event) {
eventIsExpiration = event.getOperation().isExpiration();
}
@Override
public void afterDestroy2(EntryEvent event) {
if (event.isOriginRemote()) {
assertTrue(!event.getDistributedMember().equals(getSystem().getDistributedMember()));
} else {
assertEquals(getSystem().getDistributedMember(), event.getDistributedMember());
}
assertEquals(Operation.EXPIRE_DESTROY, event.getOperation());
assertEquals(value, event.getOldValue());
eventIsExpiration = event.getOperation().isExpiration();
}
@Override
public void afterCreate2(EntryEvent event) {
// ignore
}
@Override
public void afterUpdate2(EntryEvent event) {
// ignore
}
}
SerializableRunnable createRegion = new CacheSerializableRunnable("Create with Listener") {
@Override
public void run2() throws CacheException {
AttributesFactory fac = new AttributesFactory(getRegionAttributes());
fac.addCacheListener(destroyListener = new DestroyListener());
createRegion(name, fac.create());
}
};
vm1.invoke(createRegion);
vm0.invoke(new CacheSerializableRunnable("Create with TTL") {
@Override
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory(getRegionAttributes());
factory.setStatisticsEnabled(true);
ExpirationAttributes expire = new ExpirationAttributes(timeout, ExpirationAction.DESTROY);
factory.setEntryTimeToLive(expire);
if (!getRegionAttributes().getDataPolicy().withReplication()) {
factory.setDataPolicy(DataPolicy.NORMAL);
factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
}
System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
try {
createRegion(name, factory.create());
ExpiryTask.suspendExpiration();
// suspend to make sure we can see that the put is distributed to this member
} finally {
System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
}
}
});
try {
// let region create finish before doing put
// pause(10);
vm1.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
Region region = getRootRegion().getSubregion(name);
DestroyListener dl = (DestroyListener) region.getAttributes().getCacheListeners()[0];
dl.enableEventHistory();
region.put(key, value);
// reset listener after create event
assertTrue(dl.wasInvoked());
List<CacheEvent> history = dl.getEventHistory();
CacheEvent ce = history.get(0);
dl.disableEventHistory();
assertEquals(Operation.CREATE, ce.getOperation());
return null;
}
});
vm0.invoke(new CacheSerializableRunnable("Check create received from vm1") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
WaitCriterion waitForCreate = new WaitCriterion() {
@Override
public boolean done() {
return region.getEntry(key) != null;
}
@Override
public String description() {
return "never saw create of " + key;
}
};
Wait.waitForCriterion(waitForCreate, 3000, 10, true);
}
});
} finally {
vm0.invoke(new CacheSerializableRunnable("resume expiration") {
@Override
public void run2() throws CacheException {
ExpiryTask.permitExpiration();
}
});
}
// now wait for it to expire
vm0.invoke(new CacheSerializableRunnable("Check local destroy") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
WaitCriterion waitForExpire = new WaitCriterion() {
@Override
public boolean done() {
return region.getEntry(key) == null;
}
@Override
public String description() {
return "never saw expire of " + key + " entry=" + region.getEntry(key);
}
};
Wait.waitForCriterion(waitForExpire, 4000, 10, true);
}
});
vm1.invoke(new CacheSerializableRunnable("Verify destroyed and event") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
WaitCriterion waitForExpire = new WaitCriterion() {
@Override
public boolean done() {
return region.getEntry(key) == null;
}
@Override
public String description() {
return "never saw expire of " + key + " entry=" + region.getEntry(key);
}
};
Wait.waitForCriterion(waitForExpire, 4000, 10, true);
assertTrue(destroyListener.waitForInvocation(555));
assertTrue(((DestroyListener) destroyListener).eventIsExpiration);
}
});
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class DeltaPropagationStatsDUnitTest method createServerCache.
public static Integer createServerCache(Boolean flag, DataPolicy policy, Scope scope, Boolean listener) throws Exception {
ConnectionTable.threadWantsSharedResources();
DeltaPropagationStatsDUnitTest test = new DeltaPropagationStatsDUnitTest();
Properties props = new Properties();
if (!flag) {
props.setProperty(DELTA_PROPAGATION, "false");
}
cache = test.createCache(props);
AttributesFactory factory = new AttributesFactory();
factory.setScope(scope);
factory.setDataPolicy(policy);
if (listener) {
factory.addCacheListener(new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
if (event.getKey().equals(LAST_KEY)) {
lastKeyReceived = true;
}
}
});
}
Region region = cache.createRegion(REGION_NAME, factory.create());
if (!policy.isReplicate()) {
region.create("KEY", "KEY");
}
region.getAttributesMutator().setCloningEnabled(false);
CacheServer server = cache.addCacheServer();
int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
server.setPort(port);
server.setNotifyBySubscription(true);
server.start();
return server.getPort();
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class DiskRegCbkChkJUnitTest method testAfterCallbacks.
@Test
public void testAfterCallbacks() {
region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, getDiskRegionProperties(), Scope.LOCAL);
// testing create callbacks
region.getAttributesMutator().setCacheListener(new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
intoCreateAfterCbk = true;
}
});
region.getAttributesMutator().setCacheWriter(new CacheWriterAdapter() {
public void beforeCreate(EntryEvent event) {
region.clear();
}
});
region.create("key1", "createValue");
assertTrue("Create callback not called", intoCreateAfterCbk);
// testing update callbacks
region.getAttributesMutator().setCacheListener(new CacheListenerAdapter() {
public void afterUpdate(EntryEvent event) {
intoUpdateAfterCbk = true;
}
});
region.getAttributesMutator().setCacheWriter(new CacheWriterAdapter() {
public void beforeUpdate(EntryEvent event) {
region.clear();
}
});
region.create("key2", "createValue");
region.put("key2", "updateValue");
assertTrue("Update callback not called", intoUpdateAfterCbk);
// testing destroy callbacks
region.getAttributesMutator().setCacheListener(new CacheListenerAdapter() {
public void afterDestroy(EntryEvent event) {
intoDestroyAfterCbk = true;
}
});
region.getAttributesMutator().setCacheWriter(new CacheWriterAdapter() {
public void beforeDestroy(EntryEvent event) {
region.clear();
}
});
region.create("key3", "createValue");
region.destroy("key3");
assertTrue("Destroy callback not called", intoDestroyAfterCbk);
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class HARegionQueueJUnitTest method testConcurrentEventExpiryAndTake.
/**
* The basis of HARegionQueue's Add & remove operations etc , is that the event being added first
* goes into DACE , Region etc and finally it is published by adding into the available IDs Set.
* Similarly if an event is to be removed it should first be removed from availableIDs set & then
* from behind the scenes. It will be the responsibility of the thread removing from available IDs
* successfully which will remove the entry from all other places. Now if the expiry task makes
* the event from underlying null before removing from available IDs , there is a potential
* violation. This test will validate that behaviour
*/
@Test
public void testConcurrentEventExpiryAndTake() throws Exception {
AtomicBoolean complete = new AtomicBoolean(false);
AtomicBoolean expiryCalled = new AtomicBoolean(false);
AtomicBoolean allowExpiryToProceed = new AtomicBoolean(false);
HARegionQueueAttributes haa = new HARegionQueueAttributes();
haa.setExpiryTime(3);
RegionQueue regionqueue = new HARegionQueue.TestOnlyHARegionQueue(this.testName.getMethodName(), this.cache, haa) {
@Override
CacheListener createCacheListenerForHARegion() {
return new CacheListenerAdapter() {
@Override
public void afterInvalidate(EntryEvent event) {
if (event.getKey() instanceof Long) {
synchronized (HARegionQueueJUnitTest.this) {
expiryCalled.set(true);
HARegionQueueJUnitTest.this.notifyAll();
}
Thread.yield();
synchronized (HARegionQueueJUnitTest.this) {
while (!allowExpiryToProceed.get()) {
try {
HARegionQueueJUnitTest.this.wait();
} catch (InterruptedException e) {
errorCollector.addError(e);
break;
}
}
}
try {
expireTheEventOrThreadIdentifier(event);
} catch (CacheException e) {
errorCollector.addError(e);
} finally {
synchronized (HARegionQueueJUnitTest.this) {
complete.set(true);
HARegionQueueJUnitTest.this.notifyAll();
}
}
}
}
};
}
};
EventID ev1 = new EventID(new byte[] { 1 }, 1, 1);
Conflatable cf1 = new ConflatableObject("key", "value", ev1, true, this.testName.getMethodName());
regionqueue.put(cf1);
synchronized (this) {
while (!expiryCalled.get()) {
wait();
}
}
try {
Object o = regionqueue.take();
assertThat(o, nullValue());
} catch (Exception e) {
throw new AssertionError("Test failed due to exception ", e);
} finally {
synchronized (this) {
allowExpiryToProceed.set(true);
notifyAll();
}
}
synchronized (this) {
while (!complete.get()) {
wait();
}
}
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class HARQAddOperationJUnitTest method testAddWithQRMAndExpiry.
/**
* Add with QRM & expiry : Add 10 conflatable objects (0-9). Send QRM LastDispatched as 4.
* Validate the sequence ID field of LastDispatchedWrapper object is updated to 4. Perform Take
* operations to remove next 5 to 9. The seqeunceId field should be 9. Allow ThreadIdenitifier to
* expire. The expiration should fail as the original sequenceId ( -1) does not match with 9.
* Validate reset with value 9 . The next expiry should remove the LastDisptachedWrapper
*/
@Test
public void testAddWithQRMAndExpiry() throws Exception {
try {
HARegionQueueAttributes attrs = new HARegionQueueAttributes();
attrs.setExpiryTime(10);
final HARegionQueue regionqueue = new HARegionQueue.TestOnlyHARegionQueue("testing", cache, attrs) {
CacheListener createCacheListenerForHARegion() {
return new CacheListenerAdapter() {
public void afterInvalidate(EntryEvent event) {
try {
expireTheEventOrThreadIdentifier(event);
} catch (CacheException ce) {
logger.error("HAREgionQueue::createCacheListener::Exception in the expiry thread", ce);
}
if (event.getKey() instanceof ThreadIdentifier) {
synchronized (HARQAddOperationJUnitTest.this) {
expiryCount++;
HARQAddOperationJUnitTest.this.notify();
}
}
}
};
}
};
Conflatable[] cf = new Conflatable[10];
// put 10 conflatable objects
for (int i = 0; i < 10; i++) {
cf[i] = new ConflatableObject("key" + i, "value", new EventID(new byte[] { 1 }, 1, i), true, "testing");
regionqueue.put(cf[i]);
}
ThreadIdentifier tID = new ThreadIdentifier(new byte[] { 1 }, 1);
// verify that the sequence-id for Thread-identifier is -1 (default value).
assertEquals(new Long(-1), regionqueue.getRegion().get(tID));
// remove the first 5 - (0-4 sequence IDs)
regionqueue.removeDispatchedEvents(new EventID(new byte[] { 1 }, 1, 4));
// verify that the last dispatched event was of sequence id 4
assertEquals(4, regionqueue.getLastDispatchedSequenceId(new EventID(new byte[] { 1 }, 1, 1)));
// verify 1-5 not in region
for (long i = 1; i < 6; i++) {
assertTrue(!regionqueue.getRegion().containsKey(new Long(i)));
}
// verify 6-10 still in region queue
for (long i = 6; i < 11; i++) {
assertTrue(regionqueue.getRegion().containsKey(new Long(i)));
}
// Perform 5 take operations to remove next 5-9 sequence ids
for (long i = 6; i < 11; i++) {
regionqueue.take();
}
// verify that the last dispatched event was of sequence id 10
assertEquals(9, regionqueue.getLastDispatchedSequenceId(new EventID(new byte[] { 1 }, 1, 1)));
// verify that sequence ids 1-10 all are removed from the RQ
for (long i = 1; i < 11; i++) {
assertTrue(!regionqueue.getRegion().containsKey(new Long(i)));
}
// wait until expiry thread has run once
synchronized (HARQAddOperationJUnitTest.this) {
if (0 == expiryCount) {
HARQAddOperationJUnitTest.this.wait();
}
if (1 == expiryCount) {
// verify that the Thread-identifier has not yet expired
assertEquals(1, regionqueue.getEventsMapForTesting().size());
// verify that the sequence-id for Thread-identifier is updated to 9
assertEquals(new Long(9), regionqueue.getRegion().get(tID));
// wait until expiry thread has run again
HARQAddOperationJUnitTest.this.wait();
}
}
// verify that the Thread-identifier has expired
assertEquals(0, regionqueue.getEventsMapForTesting().size());
// verify that the sequence-id for Thread-identifier is null
assertNull(regionqueue.getRegion().get(tID));
} catch (Exception e) {
throw new AssertionError("Exception occurred in test due to", e);
}
}
Aggregations