use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class PoolManagerImpl method close.
/**
* Destroys all created pool in this manager.
*/
public void close(boolean keepAlive) {
// destroying connection pools
boolean foundClientPool = false;
synchronized (poolLock) {
for (Iterator<Map.Entry<String, Pool>> itr = pools.entrySet().iterator(); itr.hasNext(); ) {
Map.Entry<String, Pool> entry = itr.next();
PoolImpl pool = (PoolImpl) entry.getValue();
pool.basicDestroy(keepAlive);
foundClientPool = true;
}
pools = Collections.emptyMap();
itrForEmergencyClose = null;
if (foundClientPool) {
// Now that the client has all the pools destroyed free up the pooled comm buffers
ServerConnection.emptyCommBufferPool();
}
}
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class PoolManagerImpl method allPoolsRegisterDataSerializers.
public static void allPoolsRegisterDataSerializers(DataSerializer dataSerializer) {
DataSerializer[] dataSerializers = new DataSerializer[1];
dataSerializers[0] = dataSerializer;
for (Iterator<Pool> itr = PoolManager.getAll().values().iterator(); itr.hasNext(); ) {
PoolImpl next = (PoolImpl) itr.next();
try {
EventID eventId = (EventID) dataSerializer.getEventId();
if (eventId == null) {
eventId = InternalDataSerializer.generateEventId();
}
if (eventId == null) {
// cache must not exist, do nothing
} else {
RegisterDataSerializersOp.execute(next, dataSerializers, eventId);
}
} catch (RuntimeException e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.PoolmanagerImpl_ERROR_REGISTERING_INSTANTIATOR_ON_POOL), e);
} finally {
next.releaseThreadLocalConnection();
}
}
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class PoolManagerImpl method allPoolsRegisterDataSerializers.
public static void allPoolsRegisterDataSerializers(SerializerAttributesHolder holder) {
SerializerAttributesHolder[] holders = new SerializerAttributesHolder[1];
holders[0] = holder;
for (Iterator<Pool> itr = PoolManager.getAll().values().iterator(); itr.hasNext(); ) {
PoolImpl next = (PoolImpl) itr.next();
try {
EventID eventId = (EventID) holder.getEventId();
if (eventId == null) {
eventId = InternalDataSerializer.generateEventId();
}
if (eventId == null) {
// cache must not exist, do nothing
} else {
RegisterDataSerializersOp.execute(next, holders, eventId);
}
} catch (RuntimeException e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.PoolmanagerImpl_ERROR_REGISTERING_INSTANTIATOR_ON_POOL), e);
} finally {
next.releaseThreadLocalConnection();
}
}
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class ConnectionPoolDUnitTest method basicTestLifetimeExpire.
private void basicTestLifetimeExpire(final boolean threadLocal) throws CacheException {
final String name = this.getName();
final Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
AsyncInvocation putAI = null;
AsyncInvocation putAI2 = null;
try {
// Create two bridge servers
SerializableRunnable createCacheServer = new CacheSerializableRunnable("Create Cache Server") {
public void run2() throws CacheException {
AttributesFactory factory = getBridgeServerRegionAttributes(null, null);
factory.setCacheListener(new DelayListener(25));
createRegion(name, factory.create());
try {
startBridgeServer(0);
} catch (Exception ex) {
org.apache.geode.test.dunit.Assert.fail("While starting CacheServer", ex);
}
}
};
vm0.invoke(createCacheServer);
final int port0 = vm0.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
vm1.invoke(createCacheServer);
final int port1 = vm1.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
SerializableRunnable stopCacheServer = new SerializableRunnable("Stop CacheServer") {
public void run() {
stopBridgeServer(getCache());
}
};
// we only had to stop it to reserve a port
vm1.invoke(stopCacheServer);
// Create one bridge client in this VM
SerializableRunnable create = new CacheSerializableRunnable("Create region") {
public void run2() throws CacheException {
getLonerSystem();
getCache();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setConcurrencyChecksEnabled(false);
ClientServerTestCase.configureConnectionPool(factory, host0, port0, port1, false, /* queue */
-1, 0, null, 100, 500, threadLocal, 500);
Region region = createRegion(name, factory.create());
// force connections to form
region.put("keyInit", new Integer(0));
region.put("keyInit2", new Integer(0));
}
};
vm2.invoke(create);
// Launch async thread that puts objects into cache. This thread will execute until
// the test has ended.
SerializableRunnable putter1 = new CacheSerializableRunnable("Put objects") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
PoolImpl pool = getPool(region);
PoolStats stats = pool.getStats();
baselineLifetimeCheck = stats.getLoadConditioningCheck();
baselineLifetimeExtensions = stats.getLoadConditioningExtensions();
baselineLifetimeConnect = stats.getLoadConditioningConnect();
baselineLifetimeDisconnect = stats.getLoadConditioningDisconnect();
try {
int count = 0;
while (!stopTestLifetimeExpire) {
count++;
region.put("keyAI1", new Integer(count));
}
} catch (NoAvailableServersException ex) {
if (stopTestLifetimeExpire) {
return;
} else {
throw ex;
}
// } catch (RegionDestroyedException e) { //will be thrown when the test ends
// /*ignore*/
// } catch (CancelException e) { //will be thrown when the test ends
// /*ignore*/
}
}
};
SerializableRunnable putter2 = new CacheSerializableRunnable("Put objects") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
int count = 0;
while (!stopTestLifetimeExpire) {
count++;
region.put("keyAI2", new Integer(count));
}
} catch (NoAvailableServersException ex) {
if (stopTestLifetimeExpire) {
return;
} else {
throw ex;
}
// } catch (RegionDestroyedException e) { //will be thrown when the test ends
// /*ignore*/
// } catch (CancelException e) { //will be thrown when the test ends
// /*ignore*/
}
}
};
putAI = vm2.invokeAsync(putter1);
putAI2 = vm2.invokeAsync(putter2);
SerializableRunnable verify1Server = new CacheSerializableRunnable("verify1Server") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
PoolImpl pool = getPool(region);
final PoolStats stats = pool.getStats();
verifyServerCount(pool, 1);
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return stats.getLoadConditioningCheck() >= (10 + baselineLifetimeCheck);
}
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, 30 * 1000, 200, true);
// make sure no replacements are happening.
// since we have 2 threads and 2 cnxs and 1 server
// when lifetimes are up we should only want to connect back to the
// server we are already connected to and thus just extend our lifetime
assertTrue("baselineLifetimeCheck=" + baselineLifetimeCheck + " but stats.getLoadConditioningCheck()=" + stats.getLoadConditioningCheck(), stats.getLoadConditioningCheck() >= (10 + baselineLifetimeCheck));
baselineLifetimeCheck = stats.getLoadConditioningCheck();
assertTrue(stats.getLoadConditioningExtensions() > baselineLifetimeExtensions);
assertTrue(stats.getLoadConditioningConnect() == baselineLifetimeConnect);
assertTrue(stats.getLoadConditioningDisconnect() == baselineLifetimeDisconnect);
}
};
SerializableRunnable verify2Servers = new CacheSerializableRunnable("verify2Servers") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
PoolImpl pool = getPool(region);
final PoolStats stats = pool.getStats();
verifyServerCount(pool, 2);
// make sure some replacements are happening.
// since we have 2 threads and 2 cnxs and 2 servers
// when lifetimes are up we should connect to the other server sometimes.
// int retry = 300;
// while ((retry-- > 0)
// && (stats.getLoadConditioningCheck() < (10+baselineLifetimeCheck))) {
// pause(100);
// }
// assertTrue("Bug 39209 expected "
// + stats.getLoadConditioningCheck()
// + " to be >= "
// + (10+baselineLifetimeCheck),
// stats.getLoadConditioningCheck() >= (10+baselineLifetimeCheck));
// TODO: does this WaitCriterion actually help?
WaitCriterion wc = new WaitCriterion() {
String excuse;
public boolean done() {
int actual = stats.getLoadConditioningCheck();
int expected = 10 + baselineLifetimeCheck;
if (actual >= expected) {
return true;
}
excuse = "Bug 39209 expected " + actual + " to be >= " + expected;
return false;
}
public String description() {
return excuse;
}
};
try {
Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
} catch (AssertionError e) {
// dumpStack();
throw e;
}
assertTrue(stats.getLoadConditioningConnect() > baselineLifetimeConnect);
assertTrue(stats.getLoadConditioningDisconnect() > baselineLifetimeDisconnect);
}
};
vm2.invoke(verify1Server);
assertEquals(true, putAI.isAlive());
assertEquals(true, putAI2.isAlive());
} finally {
vm2.invoke(new SerializableRunnable("Stop Putters") {
public void run() {
stopTestLifetimeExpire = true;
}
});
try {
if (putAI != null) {
// Verify that no exception has occurred in the putter thread
ThreadUtils.join(putAI, 30 * 1000);
if (putAI.exceptionOccurred()) {
org.apache.geode.test.dunit.Assert.fail("While putting entries: ", putAI.getException());
}
}
if (putAI2 != null) {
// Verify that no exception has occurred in the putter thread
ThreadUtils.join(putAI, 30 * 1000);
// FIXME this thread does not terminate
// if (putAI2.exceptionOccurred()) {
// fail("While putting entries: ", putAI.getException());
// }
}
} finally {
vm2.invoke(new SerializableRunnable("Stop Putters") {
public void run() {
stopTestLifetimeExpire = false;
}
});
// Close Pool
vm2.invoke(new CacheSerializableRunnable("Close Pool") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
String poolName = region.getAttributes().getPoolName();
region.localDestroyRegion();
PoolManager.find(poolName).destroy();
}
});
SerializableRunnable stopCacheServer = new SerializableRunnable("Stop CacheServer") {
public void run() {
stopBridgeServer(getCache());
}
};
vm1.invoke(stopCacheServer);
vm0.invoke(stopCacheServer);
}
}
}
use of org.apache.geode.cache.client.internal.PoolImpl in project geode by apache.
the class ConnectionPoolDUnitTest method test006Pool.
/**
* Tests the basic operations of the {@link Pool}
*
* @since GemFire 3.5
*/
@Test
public void test006Pool() throws CacheException {
final String name = this.getName();
final Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = Host.getHost(0).getVM(2);
vm0.invoke(new CacheSerializableRunnable("Create Cache Server") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setConcurrencyChecksEnabled(false);
factory.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) {
// System.err.println("CacheServer data loader called");
return helper.getKey().toString();
}
public void close() {
}
});
createRegion(name, factory.create());
// pause(1000);
try {
startBridgeServer(0);
} catch (Exception ex) {
org.apache.geode.test.dunit.Assert.fail("While starting CacheServer", ex);
}
}
});
final int port = vm0.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
SerializableRunnable create = new CacheSerializableRunnable("Create region") {
public void run2() throws CacheException {
getLonerSystem();
getCache();
validateDS();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setConcurrencyChecksEnabled(false);
ClientServerTestCase.configureConnectionPool(factory, host0, port, -1, true, -1, -1, null);
createRegion(name, factory.create());
}
};
vm1.invoke(create);
vm1.invoke(new CacheSerializableRunnable("Get values") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
for (int i = 0; i < 10; i++) {
Object value = region.get(new Integer(i));
assertEquals(String.valueOf(i), value);
}
}
});
vm1.invoke(new CacheSerializableRunnable("Update values") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
for (int i = 0; i < 10; i++) {
region.put(new Integer(i), new Integer(i));
}
}
});
vm2.invoke(create);
vm2.invoke(new CacheSerializableRunnable("Validate values") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
for (int i = 0; i < 10; i++) {
Object value = region.get(new Integer(i));
assertNotNull(value);
assertTrue(value instanceof Integer);
assertEquals(i, ((Integer) value).intValue());
}
}
});
vm1.invoke(new CacheSerializableRunnable("Close Pool") {
// do some special close validation here
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
String pName = region.getAttributes().getPoolName();
PoolImpl p = (PoolImpl) PoolManager.find(pName);
assertEquals(false, p.isDestroyed());
assertEquals(1, p.getAttachCount());
try {
p.destroy();
fail("expected IllegalStateException");
} catch (IllegalStateException expected) {
}
region.localDestroyRegion();
assertEquals(false, p.isDestroyed());
assertEquals(0, p.getAttachCount());
}
});
vm0.invoke(new SerializableRunnable("Stop CacheServer") {
public void run() {
stopBridgeServer(getCache());
}
});
}
Aggregations