use of org.apache.geode.internal.cache.PoolStats in project geode by apache.
the class QueueManagerJUnitTest method setUp.
@Before
public void setUp() {
this.logger = new LocalLogWriter(InternalLogWriter.FINEST_LEVEL, System.out);
Properties properties = new Properties();
properties.put(MCAST_PORT, "0");
properties.put(LOCATORS, "");
ds = DistributedSystem.connect(properties);
stats = new PoolStats(ds, "QueueManagerJUnitTest");
pool = new DummyPool();
endpoints = new EndpointManagerImpl("pool", ds, ds.getCancelCriterion(), pool.getStats());
source = new DummySource();
factory = new DummyFactory();
background = Executors.newSingleThreadScheduledExecutor();
final String addExpectedPEM = "<ExpectedException action=add>" + expectedPrimaryErrorMsg + "</ExpectedException>";
final String addExpectedREM = "<ExpectedException action=add>" + expectedRedundantErrorMsg + "</ExpectedException>";
ds.getLogWriter().info(addExpectedPEM);
ds.getLogWriter().info(addExpectedREM);
}
use of org.apache.geode.internal.cache.PoolStats in project geode by apache.
the class ConnectionManagerJUnitTest method setUp.
@Before
public void setUp() {
this.logger = new LocalLogWriter(InternalLogWriter.FINEST_LEVEL, System.out);
factory = new DummyFactory();
Properties properties = new Properties();
properties.put(MCAST_PORT, "0");
properties.put(LOCATORS, "");
ds = DistributedSystem.connect(properties);
background = Executors.newSingleThreadScheduledExecutor();
poolStats = new PoolStats(ds, "connectionManagerJUnitTest");
endpointManager = new EndpointManagerImpl("pool", ds, ds.getCancelCriterion(), poolStats);
cancelCriterion = new CancelCriterion() {
public String cancelInProgress() {
return null;
}
public RuntimeException generateCancelledException(Throwable e) {
return null;
}
};
}
use of org.apache.geode.internal.cache.PoolStats in project geode by apache.
the class ConnectionPoolDUnitTest method test004ForCacheLoaderException.
/**
* Test for client connection loss with CacheLoader Exception on the server.
*/
@Test
public void test004ForCacheLoaderException() throws CacheException, InterruptedException {
final String name = this.getName();
final Host host = Host.getHost(0);
VM server = host.getVM(0);
VM client = host.getVM(1);
// Create the cache servers with distributed, mirrored region
SerializableRunnable createServer = new CacheSerializableRunnable("Create Cache Server") {
public void run2() throws CacheException {
CacheLoader cl = new CacheLoader() {
public Object load(LoaderHelper helper) {
System.out.println("### CALLING CACHE LOADER....");
throw new CacheLoaderException("Test for CahceLoaderException causing Client connection to disconnect.");
}
public void close() {
}
};
AttributesFactory factory = getBridgeServerMirroredAckRegionAttributes(cl, null);
createRegion(name, factory.create());
// pause(1000);
try {
startBridgeServer(0);
} catch (Exception ex) {
org.apache.geode.test.dunit.Assert.fail("While starting CacheServer", ex);
}
}
};
getSystem().getLogWriter().info("before create server");
server.invoke(createServer);
// Create cache server clients
final int numberOfKeys = 10;
final String host0 = NetworkUtils.getServerHostName(host);
final int[] port = new int[] { server.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort()) };
final String poolName = "myPool";
SerializableRunnable createClient = new CacheSerializableRunnable("Create Cache Server Client") {
public void run2() throws CacheException {
getLonerSystem();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setConcurrencyChecksEnabled(false);
// create bridge writer
ClientServerTestCase.configureConnectionPoolWithName(factory, host0, port, true, -1, -1, null, poolName);
createRegion(name, factory.create());
}
};
getSystem().getLogWriter().info("before create client");
client.invoke(createClient);
// Initialize each client with entries (so that afterInvalidate is called)
SerializableRunnable invokeServerCacheLaoder = new CacheSerializableRunnable("Initialize Client") {
public void run2() throws CacheException {
LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
PoolStats stats = ((PoolImpl) PoolManager.find(poolName)).getStats();
int oldConnects = stats.getConnects();
int oldDisConnects = stats.getDisConnects();
try {
for (int i = 0; i < numberOfKeys; i++) {
String actual = (String) region.get("key-" + i);
}
} catch (Exception ex) {
if (!(ex.getCause() instanceof CacheLoaderException)) {
fail("UnExpected Exception, expected to receive CacheLoaderException from server, instead found: " + ex.getCause().getClass());
}
}
int newConnects = stats.getConnects();
int newDisConnects = stats.getDisConnects();
// newDisConnects);
if (newConnects != oldConnects && newDisConnects != oldDisConnects) {
fail("New connection has created for Server side CacheLoaderException.");
}
}
};
getSystem().getLogWriter().info("before initialize client");
AsyncInvocation inv2 = client.invokeAsync(invokeServerCacheLaoder);
ThreadUtils.join(inv2, 30 * 1000);
SerializableRunnable stopServer = new SerializableRunnable("stop CacheServer") {
public void run() {
stopBridgeServer(getCache());
}
};
server.invoke(stopServer);
}
use of org.apache.geode.internal.cache.PoolStats in project geode by apache.
the class ConnectionPoolDUnitTest method basicTestLifetimeExpire.
private void basicTestLifetimeExpire(final boolean threadLocal) throws CacheException {
final String name = this.getName();
final Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
AsyncInvocation putAI = null;
AsyncInvocation putAI2 = null;
try {
// Create two bridge servers
SerializableRunnable createCacheServer = new CacheSerializableRunnable("Create Cache Server") {
public void run2() throws CacheException {
AttributesFactory factory = getBridgeServerRegionAttributes(null, null);
factory.setCacheListener(new DelayListener(25));
createRegion(name, factory.create());
try {
startBridgeServer(0);
} catch (Exception ex) {
org.apache.geode.test.dunit.Assert.fail("While starting CacheServer", ex);
}
}
};
vm0.invoke(createCacheServer);
final int port0 = vm0.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
vm1.invoke(createCacheServer);
final int port1 = vm1.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
SerializableRunnable stopCacheServer = new SerializableRunnable("Stop CacheServer") {
public void run() {
stopBridgeServer(getCache());
}
};
// we only had to stop it to reserve a port
vm1.invoke(stopCacheServer);
// Create one bridge client in this VM
SerializableRunnable create = new CacheSerializableRunnable("Create region") {
public void run2() throws CacheException {
getLonerSystem();
getCache();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setConcurrencyChecksEnabled(false);
ClientServerTestCase.configureConnectionPool(factory, host0, port0, port1, false, /* queue */
-1, 0, null, 100, 500, threadLocal, 500);
Region region = createRegion(name, factory.create());
// force connections to form
region.put("keyInit", new Integer(0));
region.put("keyInit2", new Integer(0));
}
};
vm2.invoke(create);
// Launch async thread that puts objects into cache. This thread will execute until
// the test has ended.
SerializableRunnable putter1 = new CacheSerializableRunnable("Put objects") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
PoolImpl pool = getPool(region);
PoolStats stats = pool.getStats();
baselineLifetimeCheck = stats.getLoadConditioningCheck();
baselineLifetimeExtensions = stats.getLoadConditioningExtensions();
baselineLifetimeConnect = stats.getLoadConditioningConnect();
baselineLifetimeDisconnect = stats.getLoadConditioningDisconnect();
try {
int count = 0;
while (!stopTestLifetimeExpire) {
count++;
region.put("keyAI1", new Integer(count));
}
} catch (NoAvailableServersException ex) {
if (stopTestLifetimeExpire) {
return;
} else {
throw ex;
}
// } catch (RegionDestroyedException e) { //will be thrown when the test ends
// /*ignore*/
// } catch (CancelException e) { //will be thrown when the test ends
// /*ignore*/
}
}
};
SerializableRunnable putter2 = new CacheSerializableRunnable("Put objects") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
int count = 0;
while (!stopTestLifetimeExpire) {
count++;
region.put("keyAI2", new Integer(count));
}
} catch (NoAvailableServersException ex) {
if (stopTestLifetimeExpire) {
return;
} else {
throw ex;
}
// } catch (RegionDestroyedException e) { //will be thrown when the test ends
// /*ignore*/
// } catch (CancelException e) { //will be thrown when the test ends
// /*ignore*/
}
}
};
putAI = vm2.invokeAsync(putter1);
putAI2 = vm2.invokeAsync(putter2);
SerializableRunnable verify1Server = new CacheSerializableRunnable("verify1Server") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
PoolImpl pool = getPool(region);
final PoolStats stats = pool.getStats();
verifyServerCount(pool, 1);
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return stats.getLoadConditioningCheck() >= (10 + baselineLifetimeCheck);
}
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, 30 * 1000, 200, true);
// make sure no replacements are happening.
// since we have 2 threads and 2 cnxs and 1 server
// when lifetimes are up we should only want to connect back to the
// server we are already connected to and thus just extend our lifetime
assertTrue("baselineLifetimeCheck=" + baselineLifetimeCheck + " but stats.getLoadConditioningCheck()=" + stats.getLoadConditioningCheck(), stats.getLoadConditioningCheck() >= (10 + baselineLifetimeCheck));
baselineLifetimeCheck = stats.getLoadConditioningCheck();
assertTrue(stats.getLoadConditioningExtensions() > baselineLifetimeExtensions);
assertTrue(stats.getLoadConditioningConnect() == baselineLifetimeConnect);
assertTrue(stats.getLoadConditioningDisconnect() == baselineLifetimeDisconnect);
}
};
SerializableRunnable verify2Servers = new CacheSerializableRunnable("verify2Servers") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
PoolImpl pool = getPool(region);
final PoolStats stats = pool.getStats();
verifyServerCount(pool, 2);
// make sure some replacements are happening.
// since we have 2 threads and 2 cnxs and 2 servers
// when lifetimes are up we should connect to the other server sometimes.
// int retry = 300;
// while ((retry-- > 0)
// && (stats.getLoadConditioningCheck() < (10+baselineLifetimeCheck))) {
// pause(100);
// }
// assertTrue("Bug 39209 expected "
// + stats.getLoadConditioningCheck()
// + " to be >= "
// + (10+baselineLifetimeCheck),
// stats.getLoadConditioningCheck() >= (10+baselineLifetimeCheck));
// TODO: does this WaitCriterion actually help?
WaitCriterion wc = new WaitCriterion() {
String excuse;
public boolean done() {
int actual = stats.getLoadConditioningCheck();
int expected = 10 + baselineLifetimeCheck;
if (actual >= expected) {
return true;
}
excuse = "Bug 39209 expected " + actual + " to be >= " + expected;
return false;
}
public String description() {
return excuse;
}
};
try {
Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
} catch (AssertionError e) {
// dumpStack();
throw e;
}
assertTrue(stats.getLoadConditioningConnect() > baselineLifetimeConnect);
assertTrue(stats.getLoadConditioningDisconnect() > baselineLifetimeDisconnect);
}
};
vm2.invoke(verify1Server);
assertEquals(true, putAI.isAlive());
assertEquals(true, putAI2.isAlive());
} finally {
vm2.invoke(new SerializableRunnable("Stop Putters") {
public void run() {
stopTestLifetimeExpire = true;
}
});
try {
if (putAI != null) {
// Verify that no exception has occurred in the putter thread
ThreadUtils.join(putAI, 30 * 1000);
if (putAI.exceptionOccurred()) {
org.apache.geode.test.dunit.Assert.fail("While putting entries: ", putAI.getException());
}
}
if (putAI2 != null) {
// Verify that no exception has occurred in the putter thread
ThreadUtils.join(putAI, 30 * 1000);
// FIXME this thread does not terminate
// if (putAI2.exceptionOccurred()) {
// fail("While putting entries: ", putAI.getException());
// }
}
} finally {
vm2.invoke(new SerializableRunnable("Stop Putters") {
public void run() {
stopTestLifetimeExpire = false;
}
});
// Close Pool
vm2.invoke(new CacheSerializableRunnable("Close Pool") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
String poolName = region.getAttributes().getPoolName();
region.localDestroyRegion();
PoolManager.find(poolName).destroy();
}
});
SerializableRunnable stopCacheServer = new SerializableRunnable("Stop CacheServer") {
public void run() {
stopBridgeServer(getCache());
}
};
vm1.invoke(stopCacheServer);
vm0.invoke(stopCacheServer);
}
}
}
use of org.apache.geode.internal.cache.PoolStats in project geode by apache.
the class AutoConnectionSourceImplJUnitTest method setUp.
@Before
public void setUp() throws Exception {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
DistributedSystem ds = DistributedSystem.connect(props);
cache = CacheFactory.create(ds);
poolStats = new PoolStats(ds, "pool");
port = AvailablePortHelper.getRandomAvailableTCPPort();
handler = new FakeHandler();
ArrayList responseLocators = new ArrayList();
responseLocators.add(new ServerLocation(InetAddress.getLocalHost().getHostName(), port));
handler.nextLocatorListResponse = new LocatorListResponse(responseLocators, false);
// very irritating, the SystemTimer requires having a distributed system
Properties properties = new Properties();
properties.put(MCAST_PORT, "0");
properties.put(LOCATORS, "");
background = Executors.newSingleThreadScheduledExecutor();
List /* <InetSocketAddress> */
locators = new ArrayList();
locators.add(new InetSocketAddress(InetAddress.getLocalHost(), port));
source = new AutoConnectionSourceImpl(locators, "", 60 * 1000);
source.start(pool);
}
Aggregations