use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class CqQueryDUnitTest method performGC.
private void performGC(VM server, final String regionName) {
SerializableRunnable task = new CacheSerializableRunnable("perform GC") {
public void run2() throws CacheException {
Region subregion = getCache().getRegion("root/" + regionName);
DistributedTombstoneOperation gc = DistributedTombstoneOperation.gc((DistributedRegion) subregion, new EventID(getCache().getDistributedSystem()));
gc.distribute();
}
};
server.invoke(task);
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class VerifyUpdatesFromNonInterestEndPointDUnitTest method acquireConnectionsAndPut.
public static void acquireConnectionsAndPut(Integer port) {
try {
Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
String poolName = r1.getAttributes().getPoolName();
assertNotNull(poolName);
PoolImpl pool = (PoolImpl) PoolManager.find(poolName);
assertNotNull(pool);
Connection conn1 = pool.acquireConnection();
Connection conn2 = pool.acquireConnection();
ServerRegionProxy srp = new ServerRegionProxy(Region.SEPARATOR + REGION_NAME, pool);
// put on a connection which is is not interest list ep
if (conn1.getServer().getPort() == port.intValue()) {
srp.putOnForTestsOnly(conn1, "key-1", "server-value1", new EventID(new byte[] { 1 }, 1, 1), null);
srp.putOnForTestsOnly(conn1, "key-2", "server-value2", new EventID(new byte[] { 1 }, 1, 2), null);
} else if (conn2.getServer().getPort() == port.intValue()) {
srp.putOnForTestsOnly(conn2, "key-1", "server-value1", new EventID(new byte[] { 1 }, 1, 1), null);
srp.putOnForTestsOnly(conn2, "key-2", "server-value2", new EventID(new byte[] { 1 }, 1, 2), null);
}
} catch (Exception ex) {
fail("while setting acquireConnections " + ex);
}
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueStatsJUnitTest method testExpiryStats.
/**
* This test does the following:<br>
* 1)Create HARegionQueue with expiry time as 1 sec<br>
* 2)Add objects with unique eventids and conflation false and sleep for some time.<br>
* 3)Verify that statistics object is not null<br>
* 4)Verify that total events added matches the eventsEnqued stats<br>
* 5)Verify that eventsExpired stats is same as total events added as all events should have
* expired by 1 sec.
*
* @throws Exception - thrown if any problem occurs in test execution
*/
@Test
public void testExpiryStats() throws Exception {
HARegionQueueAttributes haa = new HARegionQueueAttributes();
haa.setExpiryTime(1);
HARegionQueue rq = createHARegionQueue("testExpiryStats", haa);
Conflatable cf = null;
int totalEvents = 100;
for (int i = 0; i < totalEvents; i++) {
cf = new ConflatableObject("key" + i, "value" + i, new EventID(new byte[] { 1 }, 1, i), false, "testing");
rq.put(cf);
}
Thread.sleep(3000);
HARegionQueueStats stats = rq.stats;
assertNotNull("stats for HARegionQueue found null", stats);
assertEquals("eventsEnqued by stats not equal to the actual number of events added to the queue", totalEvents, stats.getEventsEnqued());
assertEquals("expiredEvents not updated", totalEvents, stats.getEventsExpired());
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueStatsJUnitTest method testRemoveByQrmStats.
/**
* This test does the following:<br>
* 1)Create HARegionQueue.<br>
* 2)Add objects with unique eventids and conflation false<br>
* 3)Remove the events through QRM api ( <code>removeDispatchedEvents(EventID id)</code>) with a
* certain lastDispatchedSeqId<br>
* 4)Verify that statistics object is not null<br>
* 5)Verify that total events added matches the eventsEnqued stats<br>
* 6)Verify that eventsRemovedByQrm stats is same as the number of events removed by QRM (upto the
* event having lastDispatchedSeqId, step 3).
*
* @throws Exception - thrown if any problem occurs in test execution
*/
@Test
public void testRemoveByQrmStats() throws Exception {
HARegionQueue rq = createHARegionQueue("testRemoveByQrmStats");
Conflatable cf = null;
int totalEvents = 100;
for (int i = 0; i < totalEvents; i++) {
cf = new ConflatableObject("key" + i, "value" + i, new EventID(new byte[] { 1 }, 1, i), false, "testing");
rq.put(cf);
}
// call for removal thru QRM api
int lastDispatchedSqId = 20;
EventID id = new EventID(new byte[] { 1 }, 1, lastDispatchedSqId);
rq.removeDispatchedEvents(id);
HARegionQueueStats stats = rq.getStatistics();
assertNotNull("stats for HARegionQueue found null", stats);
assertEquals("eventsEnqued by stats not equal to the actual number of events added to the queue", totalEvents, stats.getEventsEnqued());
assertEquals("eventsRemovedByQrm stats not updated properly", (lastDispatchedSqId + 1), stats.getEventsRemovedByQrm());
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueStatsJUnitTest method testRemoveStats.
/**
* This test does the following:<br>
* 1)Create HARegionQueue.<br>
* 2)Add objects with unique eventids and conflation false<br>
* 3)Do some random peek and peek-batch operations and then call remove()<br>
* 4)Verify that statistics object is not null<br>
* 5)Verify that total events added matches the eventsEnqued stats<br>
* 6)Verify that eventsRemoved stats is same as the maximum batch size peeked in above peek
* operations(step 3).
*
* @throws Exception - thrown if any problem occurs in test execution
*/
@Test
public void testRemoveStats() throws Exception {
HARegionQueue rq = createHARegionQueue("testRemoveStats");
Conflatable cf = null;
int totalEvents = 100;
for (int i = 0; i < totalEvents; i++) {
cf = new ConflatableObject("key" + i, "value" + i, new EventID(new byte[] { 1 }, 1, i), false, "testing");
rq.put(cf);
}
// do some random peek operations.
int maxPeekBatchSize = 50;
rq.peek();
rq.peek(8);
rq.peek(maxPeekBatchSize);
rq.peek(35);
rq.peek();
rq.remove();
HARegionQueueStats stats = rq.getStatistics();
assertNotNull("stats for HARegionQueue found null", stats);
assertEquals("eventsEnqued by stats not equal to the actual number of events added to the queue", totalEvents, stats.getEventsEnqued());
assertEquals("All the events peeked were not removed", maxPeekBatchSize, stats.getEventsRemoved());
}
Aggregations