use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class ConnectionPoolDUnitTest method test028DynamicRegionCreation.
/**
* Test dynamic region creation instantiated from a bridge client causing regions to be created on
* two different bridge servers.
*
* Also tests the reverse situation, a dynamic region is created on the bridge server expecting
* the same region to be created on the client.
*
* Note: This test re-creates Distributed Systems for its own purposes and uses a Loner
* distributed systems to isolate the Bridge Client.
*
* @throws Exception
*/
@Test
public void test028DynamicRegionCreation() throws Exception {
final String name = this.getName();
final Host host = Host.getHost(0);
final VM client1 = host.getVM(0);
// VM client2 = host.getVM(1);
final VM srv1 = host.getVM(2);
final VM srv2 = host.getVM(3);
final String k1 = name + "-key1";
final String v1 = name + "-val1";
final String k2 = name + "-key2";
final String v2 = name + "-val2";
final String k3 = name + "-key3";
final String v3 = name + "-val3";
client1.invoke(() -> disconnectFromDS());
srv1.invoke(() -> disconnectFromDS());
srv2.invoke(() -> disconnectFromDS());
try {
// setup servers
CacheSerializableRunnable ccs = new CacheSerializableRunnable("Create Cache Server") {
public void run2() throws CacheException {
// Creates a new DS and Cache
createDynamicRegionCache(name, (String) null);
assertTrue(DynamicRegionFactory.get().isOpen());
try {
startBridgeServer(0);
} catch (IOException ugh) {
fail("Bridge Server startup failed");
}
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.setConcurrencyChecksEnabled(false);
Region region = createRootRegion(name, factory.create());
region.put(k1, v1);
Assert.assertTrue(region.get(k1).equals(v1));
}
};
srv1.invoke(ccs);
srv2.invoke(ccs);
final String srv1Host = NetworkUtils.getServerHostName(srv1.getHost());
final int srv1Port = srv1.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
final int srv2Port = srv2.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
// final String srv2Host = getServerHostName(srv2.getHost());
// setup clients, do basic tests to make sure pool with notifier work as advertised
client1.invoke(new CacheSerializableRunnable("Create Cache Client") {
public void run2() throws CacheException {
createLonerDS();
AttributesFactory factory = new AttributesFactory();
factory.setConcurrencyChecksEnabled(false);
Pool cp = ClientServerTestCase.configureConnectionPool(factory, srv1Host, srv1Port, srv2Port, true, -1, -1, null);
{
final PoolImpl pool = (PoolImpl) cp;
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
if (pool.getPrimary() == null) {
return false;
}
if (pool.getRedundants().size() < 1) {
return false;
}
return true;
}
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, 30 * 1000, 200, true);
assertNotNull(pool.getPrimary());
assertTrue("backups=" + pool.getRedundants() + " expected=" + 1, pool.getRedundants().size() >= 1);
}
createDynamicRegionCache(name, "testPool");
assertTrue(DynamicRegionFactory.get().isOpen());
factory.setScope(Scope.LOCAL);
factory.setConcurrencyChecksEnabled(false);
factory.setCacheListener(new CertifiableTestCacheListener(org.apache.geode.test.dunit.LogWriterUtils.getLogWriter()));
Region region = createRootRegion(name, factory.create());
assertNull(region.getEntry(k1));
// this should match
region.registerInterestRegex(".*", InterestResultPolicy.KEYS_VALUES);
// the key
// Update via registered interest
assertEquals(v1, region.getEntry(k1).getValue());
assertNull(region.getEntry(k2));
// use the Pool
region.put(k2, v2);
// Ensure that the notifier didn't un-do
assertEquals(v2, region.getEntry(k2).getValue());
// the put, bug 35355
// setup a key for invalidation from a notifier
region.put(k3, v3);
}
});
srv1.invoke(new CacheSerializableRunnable("Validate Server1 update") {
public void run2() throws CacheException {
CacheClientNotifier ccn = CacheClientNotifier.getInstance();
final CacheClientNotifierStats ccnStats = ccn.getStats();
final int eventCount = ccnStats.getEvents();
Region r = getRootRegion(name);
assertNotNull(r);
// Validate the Pool worked, getEntry works
assertEquals(v2, r.getEntry(k2).getValue());
// because of the mirror
// Make sure we have the other entry to use
assertEquals(v3, r.getEntry(k3).getValue());
// for notification
// Change k3, sending some data to the client notifier
r.put(k3, v1);
// Wait for the update to propagate to the clients
final int maxTime = 20000;
// long start = System.currentTimeMillis();
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return ccnStats.getEvents() > eventCount;
}
public String description() {
return "waiting for ccnStat";
}
};
Wait.waitForCriterion(ev, maxTime, 200, true);
// Set prox = ccn.getClientProxies();
// assertIndexDetailsEquals(1, prox.size());
// for (Iterator cpi = prox.iterator(); cpi.hasNext(); ) {
// CacheClientProxy ccp = (CacheClientProxy) cpi.next();
// start = System.currentTimeMillis();
// while (ccp.getMessagesProcessed() < 1) {
// assertTrue("Waited more than " + maxTime + "ms for client notification",
// (System.currentTimeMillis() - start) < maxTime);
// try {
// Thread.sleep(100);
// } catch (InterruptedException ine) { fail("Interrupted while waiting for client
// notifier to complete"); }
// }
// }
}
});
srv2.invoke(new CacheSerializableRunnable("Validate Server2 update") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
// Validate the Pool worked, getEntry works
assertEquals(v2, r.getEntry(k2).getValue());
// because of the mirror
// From peer update
assertEquals(v1, r.getEntry(k3).getValue());
}
});
client1.invoke(new CacheSerializableRunnable("Validate Client notification") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
CertifiableTestCacheListener ctl = (CertifiableTestCacheListener) r.getAttributes().getCacheListener();
ctl.waitForUpdated(k3);
// Ensure that the notifier updated the entry
assertEquals(v1, r.getEntry(k3).getValue());
}
});
// Ok, now we are ready to do some dynamic region action!
final String v1Dynamic = v1 + "dynamic";
final String dynFromClientName = name + "-dynamic-client";
final String dynFromServerName = name + "-dynamic-server";
client1.invoke(new CacheSerializableRunnable("Client dynamic region creation") {
public void run2() throws CacheException {
assertTrue(DynamicRegionFactory.get().isOpen());
Region r = getRootRegion(name);
assertNotNull(r);
Region dr = DynamicRegionFactory.get().createDynamicRegion(name, dynFromClientName);
// This should be enough to validate the creation on the server
assertNull(dr.get(k1));
dr.put(k1, v1Dynamic);
assertEquals(v1Dynamic, dr.getEntry(k1).getValue());
}
});
// Assert the servers have the dynamic region and the new value
CacheSerializableRunnable valDR = new CacheSerializableRunnable("Validate dynamic region creation on server") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
long end = System.currentTimeMillis() + 10000;
Region dr = null;
for (; ; ) {
try {
dr = r.getSubregion(dynFromClientName);
assertNotNull(dr);
assertNotNull(getCache().getRegion(name + Region.SEPARATOR + dynFromClientName));
break;
} catch (AssertionError e) {
if (System.currentTimeMillis() > end) {
throw e;
}
}
}
assertEquals(v1Dynamic, dr.getEntry(k1).getValue());
}
};
srv1.invoke(valDR);
srv2.invoke(valDR);
// now delete the dynamic region and see if it goes away on servers
client1.invoke(new CacheSerializableRunnable("Client dynamic region destruction") {
public void run2() throws CacheException {
assertTrue(DynamicRegionFactory.get().isActive());
Region r = getRootRegion(name);
assertNotNull(r);
String drName = r.getFullPath() + Region.SEPARATOR + dynFromClientName;
assertNotNull(getCache().getRegion(drName));
DynamicRegionFactory.get().destroyDynamicRegion(drName);
assertNull(getCache().getRegion(drName));
}
});
// Assert the servers no longer have the dynamic region
CacheSerializableRunnable valNoDR = new CacheSerializableRunnable("Validate dynamic region destruction on server") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
String drName = r.getFullPath() + Region.SEPARATOR + dynFromClientName;
assertNull(getCache().getRegion(drName));
try {
DynamicRegionFactory.get().destroyDynamicRegion(drName);
fail("expected RegionDestroyedException");
} catch (RegionDestroyedException expected) {
}
}
};
srv1.invoke(valNoDR);
srv2.invoke(valNoDR);
// Now try the reverse, create a dynamic region on the server and see if the client
// has it
srv2.invoke(new CacheSerializableRunnable("Server dynamic region creation") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
Region dr = DynamicRegionFactory.get().createDynamicRegion(name, dynFromServerName);
assertNull(dr.get(k1));
dr.put(k1, v1Dynamic);
assertEquals(v1Dynamic, dr.getEntry(k1).getValue());
}
});
// Assert the servers have the dynamic region and the new value
srv1.invoke(new CacheSerializableRunnable("Validate dynamic region creation propagation to other server") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
Region dr = waitForSubRegion(r, dynFromServerName);
assertNotNull(dr);
assertNotNull(getCache().getRegion(name + Region.SEPARATOR + dynFromServerName));
waitForEntry(dr, k1);
assertNotNull(dr.getEntry(k1));
assertEquals(v1Dynamic, dr.getEntry(k1).getValue());
}
});
// Assert the clients have the dynamic region and the new value
client1.invoke(new CacheSerializableRunnable("Validate dynamic region creation on client") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
long end = System.currentTimeMillis() + 10000;
Region dr = null;
for (; ; ) {
try {
dr = r.getSubregion(dynFromServerName);
assertNotNull(dr);
assertNotNull(getCache().getRegion(name + Region.SEPARATOR + dynFromServerName));
break;
} catch (AssertionError e) {
if (System.currentTimeMillis() > end) {
throw e;
} else {
Wait.pause(1000);
}
}
}
waitForEntry(dr, k1);
assertNotNull(dr.getEntry(k1));
assertEquals(v1Dynamic, dr.getEntry(k1).getValue());
}
});
// now delete the dynamic region on a server and see if it goes away on client
srv2.invoke(new CacheSerializableRunnable("Server dynamic region destruction") {
public void run2() throws CacheException {
assertTrue(DynamicRegionFactory.get().isActive());
Region r = getRootRegion(name);
assertNotNull(r);
String drName = r.getFullPath() + Region.SEPARATOR + dynFromServerName;
assertNotNull(getCache().getRegion(drName));
DynamicRegionFactory.get().destroyDynamicRegion(drName);
assertNull(getCache().getRegion(drName));
}
});
srv1.invoke(new CacheSerializableRunnable("Validate dynamic region destruction on other server") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
String drName = r.getFullPath() + Region.SEPARATOR + dynFromServerName;
{
int retry = 100;
while (retry-- > 0 && getCache().getRegion(drName) != null) {
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
fail("interrupted");
}
}
}
assertNull(getCache().getRegion(drName));
}
});
// Assert the clients no longer have the dynamic region
client1.invoke(new CacheSerializableRunnable("Validate dynamic region destruction on client") {
public void run2() throws CacheException {
Region r = getRootRegion(name);
assertNotNull(r);
String drName = r.getFullPath() + Region.SEPARATOR + dynFromServerName;
{
int retry = 100;
while (retry-- > 0 && getCache().getRegion(drName) != null) {
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
fail("interrupted");
}
}
}
assertNull(getCache().getRegion(drName));
// region,dynamicRegionList in DynamicRegionFactory // ?
try {
Thread.sleep(10000);
} catch (InterruptedException ignore) {
fail("interrupted");
}
try {
DynamicRegionFactory.get().destroyDynamicRegion(drName);
fail("expected RegionDestroyedException");
} catch (RegionDestroyedException expected) {
}
}
});
} finally {
// clean-up loner
client1.invoke(() -> disconnectFromDS());
srv1.invoke(() -> disconnectFromDS());
srv2.invoke(() -> disconnectFromDS());
}
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class CacheRegionClearStatsDUnitTest method createClientCache.
public static void createClientCache(String host, Integer port1) throws Exception {
new CacheRegionClearStatsDUnitTest();
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
new CacheRegionClearStatsDUnitTest().createCache(props);
PoolImpl p = (PoolImpl) PoolManager.createFactory().addServer(host, port1.intValue()).setSubscriptionEnabled(false).setThreadLocalConnections(true).setMinConnections(1).setReadTimeout(20000).setPingInterval(10000).setRetryAttempts(1).create("CacheRegionClearStatsDUnitTest");
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setPoolName(p.getName());
RegionAttributes attrs = factory.create();
Region region = cache.createRegion(REGION_NAME, attrs);
// region.registerInterest("ALL_KEYS");
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class LocalRegion method detachPool.
/**
* Release the client connection pool if we have one
*
* @since GemFire 5.7
*/
private void detachPool() {
ServerRegionProxy serverRegionProxy = getServerProxy();
if (serverRegionProxy != null) {
InternalCache internalCache = getCache();
String poolName = this.getPoolName();
PoolImpl pool = (PoolImpl) PoolManager.find(this.getPoolName());
if (poolName != null && pool != null) {
serverRegionProxy.detach(internalCache.keepDurableSubscriptionsAlive() || pool.getKeepAlive());
} else {
serverRegionProxy.detach(internalCache.keepDurableSubscriptionsAlive());
}
}
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class AbstractGatewaySender method stompProxyDead.
protected void stompProxyDead() {
Runnable stomper = new Runnable() {
public void run() {
PoolImpl bpi = proxy;
if (bpi != null) {
try {
bpi.destroy();
} catch (Exception e) {
/* ignore */
}
}
}
};
ThreadGroup tg = LoggingThreadGroup.createThreadGroup("Proxy Stomper Group", logger);
Thread t = new Thread(tg, stomper, "GatewaySender Proxy Stomper");
t.setDaemon(true);
t.start();
try {
t.join(GATEWAY_SENDER_TIMEOUT * 1000);
return;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayImpl_GATEWAY_0_IS_NOT_CLOSING_CLEANLY_FORCING_CANCELLATION, this));
// OK, either we've timed out or been interrupted. Time for
// violence.
// give up
t.interrupt();
// VIOLENCE!
proxy.emergencyClose();
this.proxy = null;
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class UpdatePropagationDUnitTest method updatesAreProgegatedAfterFailover.
/**
* This tests whether the updates are received by other clients or not , if there are situation of
* Interest List fail over
*/
@Test
public void updatesAreProgegatedAfterFailover() {
// First create entries on both servers via the two client
client1.invoke(() -> createEntriesK1andK2());
client2.invoke(() -> createEntriesK1andK2());
client1.invoke(() -> registerKeysK1andK2());
client2.invoke(() -> registerKeysK1andK2());
// Induce fail over of InteretsList Endpoint to Server 2 by killing server1
server1.invoke(() -> killServer(new Integer(PORT1)));
// Wait for 10 seconds to allow fail over. This would mean that Interstist has failed
// over to Server2.
final CacheSerializableRunnable waitToDetectDeadServer = new CacheSerializableRunnable("Wait for server on port1 to be dead") {
public void run2() throws CacheException {
Region r = getCache().getRegion(REGION_NAME);
String poolName = r.getAttributes().getPoolName();
final PoolImpl pool = (PoolImpl) PoolManager.find(poolName);
Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> !hasEndPointWithPort(pool, PORT1));
}
};
client1.invoke(waitToDetectDeadServer);
client2.invoke(waitToDetectDeadServer);
// Start Server1 again so that both clients1 & Client 2 will establish connection to server1
// too.
server1.invoke(() -> startServer(new Integer(PORT1)));
final CacheSerializableRunnable waitToDetectLiveServer = new CacheSerializableRunnable("Wait for servers to be alive") {
public void run2() throws CacheException {
Region r = getCache().getRegion(REGION_NAME);
String poolName = r.getAttributes().getPoolName();
final PoolImpl pool = (PoolImpl) PoolManager.find(poolName);
Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> hasEndPointWithPort(pool, PORT1));
}
};
client1.invoke(waitToDetectLiveServer);
client2.invoke(waitToDetectLiveServer);
// Do a put on Server1 via Connection object from client1.
// Client1 should not receive updated value while client2 should receive
client1.invoke(() -> acquireConnectionsAndPutonK1andK2(NetworkUtils.getServerHostName(client1.getHost())));
// Check if both the puts ( on key1 & key2 ) have reached the servers
server1.invoke(() -> verifyUpdates());
server2.invoke(() -> verifyUpdates());
// verify updates to other client
client2.invoke(() -> verifyUpdates());
// verify no updates for update originator
client1.invoke(() -> verifySenderUpdateCount());
}
Aggregations