use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueDUnitTest method testNPEDueToHARegionQueueEscapeInConstructor.
/**
* This is to test the bug which is caused when HARegionQueue object hasnot been fully constructed
* but as the HARegion has got constructed , it gets visible to QRM Message Thread.
*
* TODO: this test runs too long! Shorten run time. 1m 40s on new Mac.
*/
// GEODE-690: async queuing, time sensitive, expiration,
@Category(FlakyTest.class)
// waitForCriterion, joins
@Test
public void testNPEDueToHARegionQueueEscapeInConstructor() {
// changing EXPIRY_TIME to 5 doesn't change how long the test runs!
// test will run for this many seconds
final int EXPIRY_TIME = 30;
// Create two HARegionQueue 's in the two VMs. The frequency of QRM thread
// should be high
// Check for NullPointeException in the other VM.
CacheSerializableRunnable createQueuesAndThread = new CacheSerializableRunnable("CreateCache, HARegionQueue and start thread") {
@Override
public void run2() throws CacheException {
HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
// TODO:ASIF: Bcoz of the QRM thread cannot take frequency below
// 1 second , thus we need to carfully evaluate what to do.
// For this bug to appear ,without bugfix , qrm needs to run
// very fast.
// System.getProperties().put("QueueRemovalThreadWaitTime", new Long(10));
cache = test.createCache();
cache.setMessageSyncInterval(1);
HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
hrqa.setExpiryTime(EXPIRY_TIME);
try {
hrq = HARegionQueue.getHARegionQueueInstance("testNPEDueToHARegionQueueEscapeInConstructor", cache, hrqa, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
// changing OP_COUNT to 20 makes no difference in test time
final int OP_COUNT = 200;
// Do 1000 putand 100 take in a separate thread
for (int i = 0; i < OP_COUNT; ++i) {
hrq.put(new ConflatableObject(new Long(i), new Long(i), new EventID(new byte[] { 0 }, 1, i), false, "dummy"));
}
opThreads = new Thread[1];
opThreads[0] = new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < OP_COUNT; ++i) {
try {
Object o = hrq.take();
if (o == null) {
Thread.sleep(50);
}
} catch (InterruptedException e) {
throw new AssertionError(e);
}
}
}
});
opThreads[0].start();
} catch (Exception e) {
throw new AssertionError(e);
}
}
};
CacheSerializableRunnable createQueues = new CacheSerializableRunnable("CreateCache, HARegionQueue ") {
@Override
public void run2() throws CacheException {
createQueuesThread = Thread.currentThread();
HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
// System.getProperties().put("QueueRemovalThreadWaitTime",
// new Long(120000));
cache = test.createCache();
cache.setMessageSyncInterval(EXPIRY_TIME);
HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
hrqa.setExpiryTime(EXPIRY_TIME);
try {
hrq = HARegionQueue.getHARegionQueueInstance("testNPEDueToHARegionQueueEscapeInConstructor", cache, hrqa, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
} catch (Exception e) {
throw new AssertionError(e);
}
}
};
CacheSerializableRunnable waitForCreateQueuesThread = new CacheSerializableRunnable("joinCreateCache") {
@Override
public void run2() {
WaitCriterion ev = new WaitCriterion() {
@Override
public boolean done() {
return createQueuesThread != null;
}
@Override
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, 30 * 1000, 200, true);
ThreadUtils.join(createQueuesThread, 300 * 1000);
}
};
vm0.invoke(createQueuesAndThread);
vm1.invokeAsync(createQueues);
CacheSerializableRunnable joinWithThread = new CacheSerializableRunnable("CreateCache, HARegionQueue join with thread") {
@Override
public void run2() throws CacheException {
if (opThreads[0].isInterrupted()) {
fail("The test has failed as it encountered interrupts in puts & takes");
}
ThreadUtils.join(opThreads[0], 30 * 1000);
}
};
vm0.invoke(joinWithThread);
vm1.invoke(waitForCreateQueuesThread);
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueJUnitTest method testSafeConflationRemoval.
/**
* This test tests safe removal from the conflation map. i.e operations should only remove old
* values and not the latest value
*/
@Test
public void testSafeConflationRemoval() throws Exception {
hrqForTestSafeConflationRemoval = new HARQTestClass("testSafeConflationRemoval", this.cache);
Conflatable cf1 = new ConflatableObject("key1", "value", new EventID(new byte[] { 1 }, 1, 1), true, "testSafeConflationRemoval");
hrqForTestSafeConflationRemoval.put(cf1);
hrqForTestSafeConflationRemoval.removeDispatchedEvents(new EventID(new byte[] { 1 }, 1, 1));
Map map = (Map) hrqForTestSafeConflationRemoval.getConflationMapForTesting().get("testSafeConflationRemoval");
assertThat("Expected the counter to be 2 since it should not have been deleted but it is not so ", map.get("key1"), is(2L));
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HAGIIDUnitTest method tombstonegc.
/** queue a tombstone GC message for the client. See bug #46832 */
public static void tombstonegc() throws Exception {
LocalRegion r = (LocalRegion) cache.getRegion("/" + REGION_NAME);
assertNotNull(r);
DistributedMember id = r.getCache().getDistributedSystem().getDistributedMember();
RegionEventImpl regionEvent = new RegionEventImpl(r, Operation.REGION_DESTROY, null, true, id);
FilterInfo clientRouting = r.getFilterProfile().getLocalFilterRouting(regionEvent);
assertTrue(clientRouting.getInterestedClients().size() > 0);
regionEvent.setLocalFilterInfo(clientRouting);
Map<VersionSource, Long> map = Collections.emptyMap();
ClientTombstoneMessage message = ClientTombstoneMessage.gc(r, map, new EventID(r.getCache().getDistributedSystem()));
CacheClientNotifier.notifyClients(regionEvent, message);
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARQAddOperationJUnitTest method testPeekAndRemoveWithConflation.
/**
* Test where a 5 separate threads do 4 puts each. A thread then peeks the 20 objects & then
* invokes remove. The remove should ensure that the entries are deleted from the available IDs &
* the Counters set contained in DACE. Conflation is enabled
*/
@Test
public void testPeekAndRemoveWithConflation() throws Exception {
testFailed = false;
message = null;
final int numOfThreads = 5;
final int numOfPuts = 4;
final int batchSize = numOfThreads * numOfPuts;
final HARegionQueue regionqueue = createHARegionQueue("testPeekAndRemoveWithConflation");
Thread[] threads = new Thread[numOfThreads];
for (int i = 0; i < numOfThreads; i++) {
final long ids = i;
threads[i] = new Thread() {
public void run() {
for (int j = 0; j < numOfPuts; j++) {
EventID id = new EventID(new byte[] { (byte) ids }, ids, j);
try {
regionqueue.put(new ConflatableObject(KEY1 + ids, id.getThreadID() + "VALUE" + j, id, true, "region1"));
} catch (Exception ex) {
testFailed = true;
message.append("put failed for the threadId " + id.getThreadID());
}
}
}
};
}
for (int k = 0; k < numOfThreads; k++) {
threads[k].start();
}
for (int k = 0; k < numOfThreads; k++) {
ThreadUtils.join(threads[k], 180 * 1000);
}
if (testFailed)
fail("Test failed due to " + message);
List pickObject = regionqueue.peek(batchSize);
assertEquals(numOfThreads, pickObject.size());
regionqueue.remove();
for (int i = 0; i < numOfThreads; i++) {
// assertIndexDetailsEquals(numOfPuts,
// regionqueue.getLastDispatchedSequenceId(new EventID(
// new byte[] { (byte)i }, i, 1)));
assertEquals(0, regionqueue.getCurrentCounterSet(new EventID(new byte[] { (byte) i }, i, 1)).size());
}
assertEquals("size of availableIds map should be 0 ", 0, regionqueue.getAvalaibleIds().size());
assertEquals("size of conflation map should be 0 ", 0, ((Map) regionqueue.getConflationMapForTesting().get("region1")).size());
this.logWriter.info("testPeekAndRemoveWithConflation() completed successfully");
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARQAddOperationJUnitTest method testExpiryOnThreadIdentifier.
/**
* (Add opn followed by Take without conflation) :Add operation which creates the
* LastDispatchedAndCurrentEvents object should also add it to the Region with Threaddentifer as
* key & sequence as the value for Expiry. Perform a take operation. Validate that expiry on
* ThreadIdentifier removes itself from Events Map
*/
@Test
public void testExpiryOnThreadIdentifier() {
try {
HARegionQueueAttributes attrs = new HARegionQueueAttributes();
attrs.setExpiryTime(2);
HARegionQueue regionqueue = createHARegionQueue("testing", attrs);
// create the conflatable object
EventID id = new EventID(new byte[] { 1 }, 1, 1);
ConflatableObject obj = new ConflatableObject(KEY1, VALUE1, id, true, "region1");
ThreadIdentifier threadId = new ThreadIdentifier(obj.getEventId().getMembershipID(), obj.getEventId().getThreadID());
regionqueue.put(obj);
regionqueue.take();
Thread.sleep(25000);
assertFalse("ThreadIdentifier did not remove itself through expiry.The reqgion queue is of type=" + regionqueue.getClass(), regionqueue.getRegion().containsKey(threadId));
Map eventsMap = regionqueue.getEventsMapForTesting();
assertNull("expiry action on ThreadIdentifier did not remove itself from eventsMap", eventsMap.get(threadId));
} catch (Exception e) {
fail(" test failed due to " + e);
}
}
Aggregations