use of org.apache.geode.internal.cache.PoolFactoryImpl in project geode by apache.
the class WANLocatorServerDUnitTest method createClient.
public static void createClient(Integer port1, Integer port2, Integer port3) {
ClientCacheFactory cf = new ClientCacheFactory();
cache = (Cache) cf.create();
PoolFactoryImpl pf = (PoolFactoryImpl) PoolManager.createFactory();
pf.setReadTimeout(0);
pf.setIdleTimeout(-1);
pf.setMinConnections(4);
pf.setServerGroup(GatewayReceiver.RECEIVER_GROUP);
pf.addLocator("localhost", port1);
pf.addLocator("localhost", port2);
pf.addLocator("localhost", port3);
pf.init((GatewaySender) null);
proxy = ((PoolImpl) pf.create("KISHOR_POOL"));
Connection con1 = proxy.acquireConnection();
try {
con1.close(true);
} catch (Exception e) {
fail("createClient failed", e);
}
}
use of org.apache.geode.internal.cache.PoolFactoryImpl in project geode by apache.
the class DurableClientSimpleDUnitTest method testMultipleVMsWithSameDurableId.
/**
* Test that a second VM with the same durable id cannot connect to the server while the first VM
* is connected. Also, verify that the first client is not affected by the second one attempting
* to connect.
*/
@Ignore("TODO: test is disabled")
@Test
public void testMultipleVMsWithSameDurableId() {
// Start a server
final int serverPort = ((Integer) this.server1VM.invoke(() -> CacheServerTestUtil.createCacheServer(regionName, new Boolean(true)))).intValue();
// Start a durable client that is not kept alive on the server when it
// stops normally
final String durableClientId = getName() + "_client";
this.durableClientVM.invoke(() -> CacheServerTestUtil.createCacheClient(getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE));
// Send clientReady message
this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@Override
public void run2() throws CacheException {
CacheServerTestUtil.getCache().readyForEvents();
}
});
// Have the durable client register interest in all keys
this.durableClientVM.invoke(new CacheSerializableRunnable("Register interest") {
@Override
public void run2() throws CacheException {
// Get the region
Region region = CacheServerTestUtil.getCache().getRegion(regionName);
assertNotNull(region);
// Register interest in all keys
region.registerInterestRegex(".*", InterestResultPolicy.NONE);
}
});
// Attempt to start another durable client VM with the same id.
this.publisherClientVM.invoke(new CacheSerializableRunnable("Create another durable client") {
@Override
public void run2() throws CacheException {
getSystem(getClientDistributedSystemProperties(durableClientId));
PoolFactoryImpl pf = (PoolFactoryImpl) PoolManager.createFactory();
pf.init(getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, true));
try {
pf.create("uncreatablePool");
fail("Should not have been able to create the pool");
} catch (ServerRefusedConnectionException expected) {
// expected exception
disconnectFromDS();
} catch (Exception e) {
Assert.fail("Should not have gotten here", e);
}
}
});
// Verify durable client on server
this.server1VM.invoke(new CacheSerializableRunnable("Verify durable client") {
@Override
public void run2() throws CacheException {
// Find the proxy
checkNumberOfClientProxies(1);
CacheClientProxy proxy = getClientProxy();
assertNotNull(proxy);
// Verify that it is durable and its properties are correct
assertTrue(proxy.isDurable());
assertEquals(durableClientId, proxy.getDurableId());
assertEquals(DistributionConfig.DEFAULT_DURABLE_CLIENT_TIMEOUT, proxy.getDurableTimeout());
}
});
// Start normal publisher client
this.publisherClientVM.invoke(() -> CacheServerTestUtil.createCacheClient(getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName));
// Publish some entries
final int numberOfEntries = 10;
publishEntries(numberOfEntries);
// Verify the durable client received the updates
verifyDurableClientEvents(this.durableClientVM, numberOfEntries);
// Stop the publisher client
this.publisherClientVM.invoke(() -> CacheServerTestUtil.closeCache());
// Stop the durable client
this.durableClientVM.invoke(() -> CacheServerTestUtil.closeCache());
// Stop the server
this.server1VM.invoke(() -> CacheServerTestUtil.closeCache());
}
use of org.apache.geode.internal.cache.PoolFactoryImpl in project geode by apache.
the class ConnectionPoolFactoryJUnitTest method testCreateADirectPool.
@Test
public void testCreateADirectPool() throws Exception {
int connectionTimeout = 20;
boolean threadLocalConnections = true;
int readTimeout = 20;
int messageTrackingTimeout = 20;
int redundancy = 20;
int bufferSize = 20;
int ackInterval = 15;
PoolFactory cpf = PoolManager.createFactory();
((PoolFactoryImpl) cpf).setStartDisabled(true);
cpf.addServer("localhost", 40907).setFreeConnectionTimeout(connectionTimeout).setThreadLocalConnections(threadLocalConnections).setReadTimeout(readTimeout).setSubscriptionEnabled(true).setSubscriptionRedundancy(redundancy).setSubscriptionMessageTrackingTimeout(messageTrackingTimeout).setSubscriptionAckInterval(ackInterval).setSocketBufferSize(bufferSize);
Pool pool1 = cpf.create("myfriendlypool");
// @todo validate non default props
Map pools = PoolManager.getAll();
assertEquals("there should be one pool", 1, pools.size());
assertNotNull("pool myfriendlypool should exist and be non null", pools.get("myfriendlypool"));
/* lets make another with same name - should fail! */
boolean gotit = false;
try {
cpf.create("myfriendlypool");
} catch (IllegalStateException ise) {
gotit = true;
}
assertTrue("should have gotten an illegal state when creating duplicate pool name", gotit);
pools = PoolManager.getAll();
assertEquals("there should be one pool", 1, pools.size());
assertNotNull("pool myfriendlypool should exist and be non null", pools.get("myfriendlypool"));
/* create another legit one */
Pool pool2 = cpf.create("myfriendlypool2");
pools = PoolManager.getAll();
assertEquals("there should be two pools", 2, pools.size());
assertNotNull("pool myfriendlypool should exist and be non null", pools.get("myfriendlypool"));
assertNotNull("pool myfriendlypool2 should exist and be non null", pools.get("myfriendlypool2"));
/* lets remove them one by one */
assertEquals(pool1, PoolManager.find("myfriendlypool"));
pool1.destroy();
assertEquals(null, PoolManager.find("myfriendlypool"));
pools = PoolManager.getAll();
assertEquals("there should be one pool", 1, pools.size());
assertNull("pool myfriendlypool should NOT exist", pools.get("myfriendlypool"));
assertNotNull("pool myfriendlypool2 should exist and be non null", pools.get("myfriendlypool2"));
assertEquals(pool2, PoolManager.find("myfriendlypool2"));
pool2.destroy();
assertEquals(null, PoolManager.find("myfriendlypool2"));
pools = PoolManager.getAll();
assertEquals("there should be 0 pools", 0, pools.size());
assertNull("pool myfriendlypool should NOT exist", pools.get("myfriendlypool"));
assertNull("pool myfriendlypool2 should NOT exist and be non null", pools.get("myfriendlypool2"));
cache.close();
}
use of org.apache.geode.internal.cache.PoolFactoryImpl in project geode by apache.
the class ClientCacheCreation method create.
/**
* Fills in the contents of a {@link Cache} based on this creation object's state.
*/
@Override
void create(InternalCache cache) throws TimeoutException, CacheWriterException, GatewayException, RegionExistsException {
cache.setDeclarativeCacheConfig(this.getCacheConfig());
if (!cache.isClient()) {
throw new IllegalStateException("You must use ClientCacheFactory when the cache.xml uses client-cache.");
}
// create connection pools
Map<String, Pool> pools = getPools();
if (!pools.isEmpty()) {
for (final Pool cp : pools.values()) {
PoolFactoryImpl poolFactory = (PoolFactoryImpl) PoolManager.createFactory();
poolFactory.init(cp);
poolFactory.create(cp.getName());
}
}
cache.determineDefaultPool();
if (hasResourceManager()) {
// moved this up to fix bug 42128
getResourceManager().configure(cache.getResourceManager());
}
DiskStoreAttributesCreation pdxRegDSC = initializePdxDiskStore(cache);
cache.initializePdxRegistry();
for (DiskStore diskStore : listDiskStores()) {
DiskStoreAttributesCreation creation = (DiskStoreAttributesCreation) diskStore;
if (creation != pdxRegDSC) {
createDiskStore(creation, cache);
}
}
for (DiskStore diskStore : listDiskStores()) {
DiskStoreAttributesCreation creation = (DiskStoreAttributesCreation) diskStore;
// Don't let the DiskStoreAttributesCreation escape to the user
DiskStoreFactory factory = cache.createDiskStoreFactory(creation);
factory.create(creation.getName());
}
if (hasDynamicRegionFactory()) {
DynamicRegionFactory.get().open(getDynamicRegionFactoryConfig());
}
if (hasCopyOnRead()) {
cache.setCopyOnRead(getCopyOnRead());
}
if (this.txMgrCreation != null && this.txMgrCreation.getListeners().length > 0 && cache.getCacheTransactionManager() != null) {
cache.getCacheTransactionManager().initListeners(this.txMgrCreation.getListeners());
}
if (this.txMgrCreation != null && cache.getCacheTransactionManager() != null && this.txMgrCreation.getWriter() != null) {
throw new IllegalStateException(LocalizedStrings.TXManager_NO_WRITER_ON_CLIENT.toLocalizedString());
}
cache.initializePdxRegistry();
for (String id : this.regionAttributesNames) {
RegionAttributesCreation creation = (RegionAttributesCreation) getRegionAttributes(id);
creation.inheritAttributes(cache, false);
// Don't let the RegionAttributesCreation escape to the user
AttributesFactory factory = new AttributesFactory(creation);
RegionAttributes attrs = factory.createRegionAttributes();
cache.setRegionAttributes(id, attrs);
}
for (final Region<?, ?> region : this.roots.values()) {
RegionCreation regionCreation = (RegionCreation) region;
regionCreation.createRoot(cache);
}
cache.readyDynamicRegionFactory();
runInitializer();
}
use of org.apache.geode.internal.cache.PoolFactoryImpl in project geode by apache.
the class LocatorTestBase method startBridgeClientInVM.
protected void startBridgeClientInVM(VM vm, final String group, final String host, final int port, final String[] regions) throws Exception {
PoolFactoryImpl pf = new PoolFactoryImpl(null);
pf.addLocator(host, port).setServerGroup(group).setPingInterval(200).setSubscriptionEnabled(true).setSubscriptionRedundancy(-1);
startBridgeClientInVM(vm, pf.getPoolAttributes(), regions);
}
Aggregations