use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class DeltaPropagationDUnitTest method createDurableCacheClient.
public static void createDurableCacheClient(Pool poolAttr, String regionName, Properties dsProperties, Integer listenerCode, Boolean close) throws Exception {
new DeltaPropagationDUnitTest().createCache(dsProperties);
PoolFactoryImpl pf = (PoolFactoryImpl) PoolManager.createFactory();
pf.init(poolAttr);
PoolImpl p = (PoolImpl) pf.create("DeltaPropagationDUnitTest");
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setConcurrencyChecksEnabled(false);
factory.setPoolName(p.getName());
if (listenerCode.intValue() != 0) {
factory.addCacheListener(getCacheListener(listenerCode));
}
RegionAttributes attrs = factory.create();
Region r = cache.createRegion(regionName, attrs);
r.registerInterest("ALL_KEYS");
pool = p;
cache.readyForEvents();
logger = cache.getLogger();
closeCache = close.booleanValue();
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class FireAndForgetFunctionOnAllServersDUnitTest method testFireAndForgetFunctionOnAllServers.
@Test
public void testFireAndForgetFunctionOnAllServers() {
// Test case for Executing a fire-and-forget function on all servers as opposed to only
// executing on the ones the
// client is currently connected to.
Host host = Host.getHost(0);
VM locator = host.getVM(0);
VM server1 = host.getVM(1);
VM server2 = host.getVM(2);
VM client = host.getVM(3);
final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
final String locatorHost = NetworkUtils.getServerHostName(host);
// Step 1. Start a locator and one cache server.
locator.invoke("Start Locator", () -> startLocator(locatorHost, locatorPort, ""));
String locString = getLocatorString(host, locatorPort);
// Step 2. Start a server and create a replicated region "R1".
server1.invoke("Start BridgeServer", () -> startBridgeServer(new String[] { "R1" }, locString, new String[] { "R1" }));
// Step 3. Create a client cache with pool mentioning locator.
client.invoke("create client cache and pool mentioning locator", () -> {
ClientCacheFactory ccf = new ClientCacheFactory();
ccf.addPoolLocator(locatorHost, locatorPort);
ClientCache cache = ccf.create();
Pool pool1 = PoolManager.createFactory().addLocator(locatorHost, locatorPort).setServerGroup("R1").create("R1");
Region region1 = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).setPoolName("R1").create("R1");
// Step 4. Execute the function to put DistributedMemberID into above created replicated
// region.
Function function = new FireAndForgetFunctionOnAllServers();
FunctionService.registerFunction(function);
PoolImpl pool = (PoolImpl) pool1;
await().atMost(60, SECONDS).until(() -> Assert.assertEquals(1, pool.getCurrentServers().size()));
String regionName = "R1";
Execution dataSet = FunctionService.onServers(pool1);
dataSet.setArguments(regionName).execute(function);
// Using Awatility, if the condition is not met during the timeout, a
// ConditionTimeoutException will be thrown. This makes analyzing the failure much simpler
// Step 5. Assert for the region keyset size with 1.
await().atMost(60, SECONDS).until(() -> Assert.assertEquals(1, region1.keySetOnServer().size()));
region1.clear();
// Step 6. Start another server mentioning locator and create a replicated region "R1".
server2.invoke("Start BridgeServer", () -> startBridgeServer(new String[] { "R1" }, locString, new String[] { "R1" }));
// Step 7. Execute the same function to put DistributedMemberID into above created replicated
// region.
await().atMost(60, SECONDS).until(() -> Assert.assertEquals(1, pool.getCurrentServers().size()));
dataSet = FunctionService.onServers(pool1);
dataSet.setArguments(regionName).execute(function);
await().atMost(60, SECONDS).until(() -> Assert.assertEquals(2, pool.getCurrentServers().size()));
// Using Awatility, if the condition is not met during the timeout, a
// ConditionTimeoutException will be thrown. This makes analyzing the failure much simpler
// Step 8. Assert for the region keyset size with 2, since above function was executed on 2
// servers.
await().atMost(60, SECONDS).until(() -> {
Assert.assertEquals(2, region1.keySetOnServer().size());
});
region1.clear();
// Step 8.Stop one of the servers.
server1.invoke("Stop BridgeServer", () -> stopBridgeMemberVM(server1));
// Step 9. Execute the same function to put DistributedMemberID into above created replicated
// region.
dataSet = FunctionService.onServers(pool1);
dataSet.setArguments(regionName).execute(function);
await().atMost(60, SECONDS).until(() -> Assert.assertEquals(1, pool.getCurrentServers().size()));
// Using Awatility, if the condition is not met during the timeout, a
// ConditionTimeoutException will be thrown. This makes analyzing the failure much simpler
// Step 10. Assert for the region keyset size with 1, since only one server was running.
await().atMost(60, SECONDS).until(() -> {
Assert.assertEquals(1, region1.keySetOnServer().size());
});
region1.clear();
return null;
});
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class PutAllDUnitTest method createClientCache1.
/** function to create client cache **/
public static void createClientCache1(String host, Integer port1) throws Exception {
int PORT1 = port1.intValue();
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
new PutAllDUnitTest().createCache(props);
props.setProperty("retryAttempts", "2");
props.setProperty("endpoints", "ep1=" + host + ":" + PORT1);
props.setProperty("redundancyLevel", "-1");
props.setProperty("establishCallbackConnection", "true");
props.setProperty("LBPolicy", "Sticky");
props.setProperty("readTimeout", "2000");
props.setProperty("socketBufferSize", "1000");
props.setProperty("retryInterval", "250");
props.setProperty("connectionsPerServer", "2");
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
PoolImpl p = (PoolImpl) ClientServerTestCase.configureConnectionPool(factory, host, PORT1, -1, true, -1, 2, null);
CacheListener clientListener = new HAEventIdPropagationListenerForClient1();
factory.setCacheListener(clientListener);
RegionAttributes attrs = factory.create();
cache.createRegion(REGION_NAME, attrs);
Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
assertNotNull(region);
region.registerInterest("ALL_KEYS", InterestResultPolicy.NONE);
pool = p;
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class HASlowReceiverDUnitTest method createClientCache.
public static void createClientCache(String host, Integer port1, Integer port2, Integer port3, Integer rLevel, Boolean addListener) throws Exception {
CacheServerTestUtil.disableShufflingOfEndpoints();
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
new HASlowReceiverDUnitTest().createCache(props);
AttributesFactory factory = new AttributesFactory();
PoolImpl p = (PoolImpl) PoolManager.createFactory().addServer("localhost", port1).addServer("localhost", port2).addServer("localhost", port3).setSubscriptionEnabled(true).setSubscriptionRedundancy(rLevel.intValue()).setThreadLocalConnections(true).setMinConnections(6).setReadTimeout(20000).setPingInterval(1000).setRetryAttempts(5).create("HASlowRecieverDUnitTestPool");
factory.setScope(Scope.LOCAL);
factory.setPoolName(p.getName());
if (addListener.booleanValue()) {
factory.addCacheListener(new CacheListenerAdapter() {
@Override
public void afterUpdate(EntryEvent event) {
if (event.getNewValue().equals("v20")) {
try {
Thread.sleep(120000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
});
}
RegionAttributes attrs = factory.create();
cache.createRegion(regionName, attrs);
pool = p;
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class PutAllDUnitTest method createClientCache2.
/**
* function to create client cache with HAEventIdPropagationListenerForClient2 as the listener
**/
public static void createClientCache2(String host, Integer port1) throws Exception {
int PORT1 = port1.intValue();
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
new PutAllDUnitTest().createCache(props);
props.setProperty("retryAttempts", "2");
props.setProperty("endpoints", "ep1=" + host + ":" + PORT1);
props.setProperty("redundancyLevel", "-1");
props.setProperty("establishCallbackConnection", "true");
props.setProperty("LBPolicy", "Sticky");
props.setProperty("readTimeout", "2000");
props.setProperty("socketBufferSize", "1000");
props.setProperty("retryInterval", "250");
props.setProperty("connectionsPerServer", "2");
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
PoolImpl p = (PoolImpl) ClientServerTestCase.configureConnectionPool(factory, host, PORT1, -1, true, -1, 2, null);
CacheListener clientListener = new HAEventIdPropagationListenerForClient2();
factory.setCacheListener(clientListener);
RegionAttributes attrs = factory.create();
cache.createRegion(REGION_NAME, attrs);
Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
assertNotNull(region);
region.registerInterest("ALL_KEYS", InterestResultPolicy.NONE);
pool = p;
}
Aggregations