use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueStatsJUnitTest method testVoidRemovalStats.
/**
* This test does the following:<br>
* 1)Create HARegionQueue.<br>
* 2)Add objects with unique eventids and conflation false<br>
* 3)peek a batch to peek all the events added and take() all the events<br>
* 4)Call remove()<br>
* 5)Verify that statistics object is not null<br>
* 6)Verify that total events added matches the eventsEnqued stats<br>
* 7)Verify that numVoidRemovals stats is same as the total events added since all the peeked
* events were removed by take() call and remove() was a void operation.
*
* @throws Exception - thrown if any problem occurs in test execution
*/
@Test
public void testVoidRemovalStats() throws Exception {
HARegionQueue rq = createHARegionQueue("testVoidRemovalStats");
Conflatable cf = null;
int totalEvents = 100;
for (int i = 0; i < totalEvents; i++) {
cf = new ConflatableObject("key" + i, "value" + i, new EventID(new byte[] { 1 }, 1, i), false, "testing");
rq.put(cf);
}
rq.peek(totalEvents);
rq.take(totalEvents);
rq.remove();
HARegionQueueStats stats = rq.getStatistics();
assertNotNull("stats for HARegionQueue found null", stats);
assertEquals("eventsEnqued by stats not equal to the actual number of events added to the queue", totalEvents, stats.getEventsEnqued());
assertEquals("Number of void removals shud be equal to total peeked since all the events were removed by take() before remove()", totalEvents, stats.getNumVoidRemovals());
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HARegionQueueStatsJUnitTest method testPutStatsNoConflation.
/**
* This test does the following:<br>
* 1)Create HARegionQueue<br>
* 2)Add objects with unique eventids and conflation false <br>
* 3)Verify that statistics object is not null<br>
* 4)Verify that total events added matches the eventsEnqued stats<br>
* 5)Verify that eventsConflated stats is zero.
*
* @throws Exception - thrown if any problem occurs in test execution
*/
@Test
public void testPutStatsNoConflation() throws Exception {
HARegionQueue rq = createHARegionQueue("testPutStatsNoConflation");
Conflatable cf = null;
int totalEvents = 100;
for (int i = 0; i < totalEvents; i++) {
cf = new ConflatableObject("key" + i, "value" + i, new EventID(new byte[] { 1 }, 1, i), false, "testing");
rq.put(cf);
}
HARegionQueueStats stats = rq.getStatistics();
assertNotNull("stats for HARegionQueue found null", stats);
assertEquals("eventsEnqued by stats not equal to the actual number of events added to the queue", totalEvents, stats.getEventsEnqued());
assertEquals("eventsConflated by stats not equal zero", 0, stats.getEventsConflated());
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class ClientHealthMonitorJUnitTest method testDeadClientRemovalByServer.
/**
* This test performs the following:<br>
* 1)create server<br>
* 2)initialize proxy object and create region for client<br>
* 3)perform a PUT on client by acquiring Connection through proxy<br>
* 4)stop server monitor threads in client to ensure that server treats this as dead client <br>
* 5)wait for some time to allow server to clean up the dead client artifacts<br>
* 6)again perform a PUT on client through same Connection and verify after the put that the
* Connection object used was new one.
*/
@Test
public void testDeadClientRemovalByServer() throws Exception {
PORT = createServer();
createProxyAndRegionForClient();
// String connection2String = null;
StatisticsType st = this.system.findType("CacheServerStats");
final Statistics s = this.system.findStatisticsByType(st)[0];
assertEquals(0, s.getInt("currentClients"));
assertEquals(0, s.getInt("currentClientConnections"));
this.system.getLogWriter().info("beforeAcquireConnection clients=" + s.getInt("currentClients") + " cnxs=" + s.getInt("currentClientConnections"));
Connection connection1 = proxy.acquireConnection();
this.system.getLogWriter().info("afterAcquireConnection clients=" + s.getInt("currentClients") + " cnxs=" + s.getInt("currentClientConnections"));
this.system.getLogWriter().info("acquired connection " + connection1);
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return s.getInt("currentClients") != 0;
}
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, 20 * 1000, 200, true);
assertEquals(1, s.getInt("currentClients"));
assertEquals(1, s.getInt("currentClientConnections"));
// String connection1String = connection1.toString();
ServerRegionProxy srp = new ServerRegionProxy("region1", proxy);
srp.putOnForTestsOnly(connection1, "key-1", "value-1", new EventID(new byte[] { 1 }, 1, 1), null);
this.system.getLogWriter().info("did put 1");
// proxy.testfinalizeServerConnectionMonitor();
ev = new WaitCriterion() {
public boolean done() {
return s.getInt("currentClients") == 0;
}
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, TIME_BETWEEN_PINGS * 5, 200, true);
{
this.system.getLogWriter().info("currentClients=" + s.getInt("currentClients") + " currentClientConnections=" + s.getInt("currentClientConnections"));
assertEquals(0, s.getInt("currentClients"));
assertEquals(0, s.getInt("currentClientConnections"));
}
addExceptions();
// the connection should now fail since the server timed it out
try {
srp.putOnForTestsOnly(connection1, "key-1", "fail", new EventID(new byte[] { 1 }, 1, 2), null);
fail("expected EOF");
} catch (ServerConnectivityException expected) {
}
// The rest of this test no longer works.
// connection1.finalizeConnection();
// proxy.release();
// connection1 = proxy.acquireConnection();
// connection2String = connection1.toString();
// this.system.getLogWriter().info("connection is now " + connection2String);
// if (connection1String.equals(connection2String)) {
// fail("New connection object was not obtained");
// }
// connection1.putObject("region1", "key-1", "value-2", new EventID(new byte[] {1},1,3), null);
// this.system.getLogWriter().info("did put 2");
// assertIndexDetailsEquals(1, s.getInt("currentClients"));
// assertIndexDetailsEquals(1, s.getInt("currentClientConnections"));
// // now lets see what happens when we close our connection
// // note we use a nasty close which just closes the socket instead
// // of sending a nice message to the server telling him we are going away
// ((ConnectionImpl)connection1).finalizeConnection();
// {
// int retry = (TIME_BETWEEN_PINGS*5) / 100;
// while (s.getInt("currentClients") > 0 && retry-- > 0) {
// Thread.sleep(100);
// }
// this.system.getLogWriter().info("currentClients="
// + s.getInt("currentClients")
// + " currentClientConnections="
// + s.getInt("currentClientConnections"));
// assertIndexDetailsEquals(0, s.getInt("currentClients"));
// assertIndexDetailsEquals(0, s.getInt("currentClientConnections"));
// }
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class ClearPropagationDUnitTest method acquireConnectionsAndDestroyRegion.
public static void acquireConnectionsAndDestroyRegion(String host) {
try {
Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
assertNotNull(r1);
String poolName = r1.getAttributes().getPoolName();
assertNotNull(poolName);
PoolImpl pool = (PoolImpl) PoolManager.find(poolName);
assertNotNull(pool);
Connection conn1 = pool.acquireConnection(new ServerLocation(host, PORT2));
assertNotNull(conn1);
assertEquals(PORT2, conn1.getServer().getPort());
ServerRegionProxy srp = new ServerRegionProxy(Region.SEPARATOR + REGION_NAME, pool);
srp.destroyRegionOnForTestsOnly(conn1, new EventID(new byte[] { 1 }, 1, 1), null);
} catch (Exception ex) {
ex.printStackTrace();
fail("while setting acquireConnections " + ex);
}
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class ConnectionProxyJUnitTest method testDuplicateSeqIdLesserThanCurrentSeqIdBeingIgnored.
@Test
public void testDuplicateSeqIdLesserThanCurrentSeqIdBeingIgnored() {
int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
CacheServer server = null;
try {
try {
server = this.cache.addCacheServer();
server.setMaximumTimeBetweenPings(10000);
server.setPort(port3);
server.start();
} catch (Exception e) {
e.printStackTrace();
fail("Failed to create server");
}
try {
PoolFactory pf = PoolManager.createFactory();
pf.addServer("localhost", port3);
pf.setSubscriptionEnabled(true);
pf.setSubscriptionRedundancy(-1);
pf.setSubscriptionMessageTrackingTimeout(100000);
proxy = (PoolImpl) pf.create("clientPool");
EventID eid1 = new EventID(new byte[0], 1, 5);
if (proxy.verifyIfDuplicate(eid1)) {
fail(" eid1 can never be duplicate, it is being created for the first time! ");
}
EventID eid2 = new EventID(new byte[0], 1, 2);
if (!proxy.verifyIfDuplicate(eid2)) {
fail(" eid2 should be duplicate, seqId is less than highest (5)");
}
EventID eid3 = new EventID(new byte[0], 1, 3);
if (!proxy.verifyIfDuplicate(eid3)) {
fail(" eid3 should be duplicate, seqId is less than highest (5)");
}
assertTrue(!proxy.getThreadIdToSequenceIdMap().isEmpty());
proxy.destroy();
} catch (Exception ex) {
ex.printStackTrace();
fail("Failed to initialize client");
}
} finally {
if (server != null) {
server.stop();
}
}
}
Aggregations