use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARQAddOperationJUnitTest method testMultipleQRMArrival.
/**
* Multiple arrivals of QRM for the same thread id of the client.Intially queue contains objects
* from 1- 10. QRM with sequenceID 5 arrives It should remove only remove objects for 1- 5. Then
* sequenceID 10 come which should remove 5-10.
*/
@Test
public void testMultipleQRMArrival() throws Exception {
HARegionQueue regionqueue = createHARegionQueue("testNoExpiryOnThreadIdentifier");
EventID[] ids = new EventID[10];
for (int i = 0; i < 10; i++) {
ids[i] = new EventID(new byte[] { 1 }, 1, i + 1);
}
for (int i = 0; i < 10; i++) {
regionqueue.put(new ConflatableObject("KEY " + i, "VALUE" + i, ids[i], true, "region1"));
}
// Available id size should be == 10 after puting ten entries
assertEquals(10, regionqueue.getAvalaibleIds().size());
// QRM message for therad id 1 and last sequence id 5
regionqueue.removeDispatchedEvents(ids[4]);
assertEquals(5, regionqueue.getAvalaibleIds().size());
assertEquals(5, regionqueue.getCurrentCounterSet(ids[0]).size());
Iterator iter = regionqueue.getCurrentCounterSet(ids[0]).iterator();
while (iter.hasNext()) {
Long cntr = (Long) iter.next();
ConflatableObject co = (ConflatableObject) regionqueue.getRegion().get(cntr);
assertTrue(co.getEventId().getSequenceID() > 5);
}
regionqueue.removeDispatchedEvents(ids[9]);
assertEquals(0, regionqueue.getAvalaibleIds().size());
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueDUnitTest method verifyMapsAndData.
/**
* verifies the data has been populated correctly after GII
*/
private static void verifyMapsAndData() {
try {
HARegion r1 = (HARegion) hrq.getRegion();
// region should not be null
assertNotNull(" Did not expect the HARegion to be null but it is", r1);
// it should have ten non null entries
for (int i = 1; i < 11; i++) {
assertNotNull(" Did not expect the entry to be null but it is", r1.get(new Long(i)));
}
// HARegionQueue should not be null
assertNotNull(" Did not expect the HARegionQueue to be null but it is", hrq);
Map conflationMap = hrq.getConflationMapForTesting();
// conflationMap size should be greater than 0
assertTrue(" Did not expect the conflationMap size to be 0 but it is", conflationMap.size() > 0);
Map internalMap = (Map) conflationMap.get("HARegionQueueDUnitTest_region");
// internal map should not be null. it should be present
assertNotNull(" Did not expect the internalMap to be null but it is", internalMap);
// get and verify the entries in the conflation map.
for (int i = 1; i < 11; i++) {
assertTrue(" Did not expect the entry not to be equal but it is", internalMap.get("key" + i).equals(new Long(i)));
}
Map eventMap = hrq.getEventsMapForTesting();
// DACE should not be null
assertNotNull(" Did not expect the result (DACE object) to be null but it is", eventMap.get(new ThreadIdentifier(new byte[] { 1 }, 1)));
Set counterSet = hrq.getCurrentCounterSet(new EventID(new byte[] { 1 }, 1, 1));
assertTrue(" excpected the counter set size to be 10 but it is not so", counterSet.size() == 10);
long i = 1;
Iterator iterator = counterSet.iterator();
// set is a LinkedHashSet
while (iterator.hasNext()) {
assertTrue(((Long) iterator.next()).longValue() == i);
i++;
}
// The last dispactchde sequence Id should be -1 since no dispatch has
// been made
assertTrue(hrq.getLastDispatchedSequenceId(new EventID(new byte[] { 1 }, 1, 1)) == -1);
// sleep for 8.0 seconds. Everythign should expire and everything should
// be null and empty
Thread.sleep(7500);
for (int j = 1; j < 11; j++) {
assertNull("expected the entry to be null since expiry time exceeded but it is not so", r1.get(new Long(j)));
}
internalMap = (Map) hrq.getConflationMapForTesting().get("HARegionQueueDUnitTest_region");
assertNotNull(" Did not expect the internalMap to be null but it is", internalMap);
assertTrue("internalMap (conflation) should have been emptry since expiry of all entries has been exceeded but it is not so", internalMap.isEmpty());
assertTrue("eventMap should have been emptry since expiry of all entries has been exceeded but it is not so", eventMap.isEmpty());
assertTrue("counter set should have been emptry since expiry of all entries has been exceeded but it is not so", counterSet.isEmpty());
} catch (Exception ex) {
fail("failed while region.put()", ex);
}
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueDUnitTest method createRegionQueue.
private static void createRegionQueue() throws Exception {
HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
cache = test.createCache();
/*
* AttributesFactory factory = new AttributesFactory(); factory.setScope(Scope.DISTRIBUTED_ACK);
* factory.setDataPolicy(DataPolicy.REPLICATE);
*/
hrq = HARegionQueue.getHARegionQueueInstance("HARegionQueueDUnitTest_region", cache, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
ConflatableObject c1 = new ConflatableObject("1", "1", id1, false, "HARegionQueueDUnitTest_region");
ConflatableObject c2 = new ConflatableObject("2", "2", id2, false, "HARegionQueueDUnitTest_region");
hrq.put(c1);
hrq.put(c2);
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueDUnitTest method testBugNo35988.
/**
* Behaviour of take() has been changed for reliable messaging feature. Region queue take()
* operation will no longer add to the Dispatch Message Map. Hence disabling the test - SUYOG
*
* Test for #35988 HARegionQueue.take() is not functioning as expected
*/
@Ignore("TODO: this test was disabled")
@Test
public void testBugNo35988() throws Exception {
CacheSerializableRunnable createQueue = new CacheSerializableRunnable("CreateCache, HARegionQueue and start thread") {
@Override
public void run2() throws CacheException {
HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
// TODO:ASIF: Bcoz of the QRM thread cannot take frequency below
// 1 second , thus we need to carfully evaluate what to do. Though
// in this case 1 second instead of 500 ms will work
// System.getProperties().put("QueueRemovalThreadWaitTime", new Long(500));
cache = test.createCache();
cache.setMessageSyncInterval(1);
HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
hrqa.setExpiryTime(300);
try {
hrq = HARegionQueue.getHARegionQueueInstance("testregion1", cache, hrqa, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
// Do 1000 putand 100 take in a separate thread
hrq.put(new ConflatableObject(new Long(1), new Long(1), new EventID(new byte[] { 0 }, 1, 1), false, "dummy"));
} catch (Exception e) {
throw new AssertionError(e);
}
}
};
vm0.invoke(createQueue);
vm1.invoke(createQueue);
vm0.invoke(new CacheSerializableRunnable("takeFromVm0") {
@Override
public void run2() throws CacheException {
try {
Conflatable obj = (Conflatable) hrq.take();
assertNotNull(obj);
} catch (Exception e) {
throw new AssertionError(e);
}
}
});
vm1.invoke(new CacheSerializableRunnable("checkInVm1") {
@Override
public void run2() throws CacheException {
WaitCriterion ev = new WaitCriterion() {
@Override
public boolean done() {
// TODO is this necessary?
Thread.yield();
return hrq.size() == 0;
}
@Override
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, 60 * 1000, 200, true);
}
});
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueDUnitTest method concurrentOperationsDunitTest.
private void concurrentOperationsDunitTest(final boolean createBlockingQueue, final Scope rscope) {
// Create Cache and HARegionQueue in all the 4 VMs.
CacheSerializableRunnable createRgnsAndQueues = new CacheSerializableRunnable("CreateCache, mirrored Region & HARegionQueue with a CacheListener") {
@Override
public void run2() throws CacheException {
HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
System.getProperties().put("QueueRemovalThreadWaitTime", "2000");
cache = test.createCache();
AttributesFactory factory = new AttributesFactory();
factory.setScope(rscope);
factory.setDataPolicy(DataPolicy.REPLICATE);
HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
hrqa.setExpiryTime(5);
try {
if (createBlockingQueue) {
hrq = HARegionQueue.getHARegionQueueInstance("testregion1", cache, hrqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
} else {
hrq = HARegionQueue.getHARegionQueueInstance("testregion1", cache, hrqa, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
}
} catch (Exception e) {
throw new AssertionError(e);
}
factory.addCacheListener(new CacheListenerAdapter() {
@Override
public void afterCreate(final EntryEvent event) {
Conflatable conflatable = new ConflatableObject(event.getKey(), event.getNewValue(), ((EntryEventImpl) event).getEventId(), false, event.getRegion().getFullPath());
try {
hrq.put(conflatable);
} catch (Exception e) {
fail("The put operation in queue did not succeed due to exception =", e);
}
}
@Override
public void afterUpdate(final EntryEvent event) {
Conflatable conflatable = new ConflatableObject(event.getKey(), event.getNewValue(), ((EntryEventImpl) event).getEventId(), true, event.getRegion().getFullPath());
try {
hrq.put(conflatable);
} catch (Exception e) {
fail("The put operation in queue did not succeed due to exception =", e);
}
}
});
cache.createRegion("test_region", factory.create());
}
};
vm0.invoke(createRgnsAndQueues);
vm1.invoke(createRgnsAndQueues);
vm2.invoke(createRgnsAndQueues);
vm3.invoke(createRgnsAndQueues);
CacheSerializableRunnable spawnThreadsAndperformOps = new CacheSerializableRunnable("Spawn multiple threads which do various operations") {
@Override
public void run2() throws CacheException {
opThreads = new Thread[4 + 2 + 2 + 2];
for (int i = 0; i < 4; ++i) {
opThreads[i] = new Thread(new RunOp(RunOp.PUT, i), "ID=" + i + ",Op=" + RunOp.PUT);
}
for (int i = 4; i < 6; ++i) {
opThreads[i] = new Thread(new RunOp(RunOp.PEEK, i), "ID=" + i + ",Op=" + RunOp.PEEK);
}
for (int i = 6; i < 8; ++i) {
opThreads[i] = new Thread(new RunOp(RunOp.TAKE, i), "ID=" + i + ",Op=" + RunOp.TAKE);
}
for (int i = 8; i < 10; ++i) {
opThreads[i] = new Thread(new RunOp(RunOp.TAKE, i), "ID=" + i + ",Op=" + RunOp.BATCH_PEEK);
}
for (int i = 0; i < opThreads.length; ++i) {
opThreads[i].start();
}
}
};
vm0.invokeAsync(spawnThreadsAndperformOps);
vm1.invokeAsync(spawnThreadsAndperformOps);
vm2.invokeAsync(spawnThreadsAndperformOps);
vm3.invokeAsync(spawnThreadsAndperformOps);
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
fail("Test failed as the test thread encoutered exception in sleep", e1);
}
// Asif : In case of blocking HARegionQueue do some extra puts so that the
// blocking threads
// are exited
CacheSerializableRunnable toggleFlag = new CacheSerializableRunnable("Toggle the flag to signal end of threads") {
@Override
public void run2() throws CacheException {
toCnt = false;
if (createBlockingQueue) {
try {
for (int i = 0; i < 100; ++i) {
hrq.put(new ConflatableObject("1", "1", new EventID(new byte[] { 1 }, 100, i), false, "/x"));
}
} catch (Exception e) {
throw new AssertionError(e);
}
}
}
};
vm0.invokeAsync(toggleFlag);
vm1.invokeAsync(toggleFlag);
vm2.invokeAsync(toggleFlag);
vm3.invokeAsync(toggleFlag);
CacheSerializableRunnable joinWithThreads = new CacheSerializableRunnable("Join with the threads") {
@Override
public void run2() throws CacheException {
for (int i = 0; i < opThreads.length; ++i) {
if (opThreads[i].isInterrupted()) {
fail("Test failed because thread encountered exception");
}
ThreadUtils.join(opThreads[i], 30 * 1000);
}
}
};
vm0.invoke(joinWithThreads);
vm1.invoke(joinWithThreads);
vm2.invoke(joinWithThreads);
vm3.invoke(joinWithThreads);
System.getProperties().remove("QueueRemovalThreadWaitTime");
}
Aggregations