use of org.apache.geode.internal.cache.CacheServerImpl in project geode by apache.
the class HAStartupAndFailoverDUnitTest method verifyDispatcherIsNotAlive.
public static void verifyDispatcherIsNotAlive() {
try {
Cache c = CacheFactory.getAnyInstance();
// assertIndexDetailsEquals("More than one BridgeServer", 1,
// c.getCacheServers().size());
WaitCriterion wc = new WaitCriterion() {
String excuse;
public boolean done() {
return cache.getCacheServers().size() == 1;
}
public String description() {
return excuse;
}
};
Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
CacheServerImpl bs = (CacheServerImpl) c.getCacheServers().iterator().next();
assertNotNull(bs);
assertNotNull(bs.getAcceptor());
assertNotNull(bs.getAcceptor().getCacheClientNotifier());
final CacheClientNotifier ccn = bs.getAcceptor().getCacheClientNotifier();
wc = new WaitCriterion() {
String excuse;
public boolean done() {
return ccn.getClientProxies().size() > 0;
}
public String description() {
return excuse;
}
};
Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
Iterator iter_prox = ccn.getClientProxies().iterator();
if (iter_prox.hasNext()) {
CacheClientProxy proxy = (CacheClientProxy) iter_prox.next();
assertFalse("Dispatcher on secondary should not be alive", proxy._messageDispatcher.isAlive());
}
} catch (Exception ex) {
fail("while setting verifyDispatcherIsNotAlive " + ex);
}
}
use of org.apache.geode.internal.cache.CacheServerImpl in project geode by apache.
the class HAInterestTestCase method verifyDispatcherIsNotAlive.
public static void verifyDispatcherIsNotAlive() {
WaitCriterion wc = new WaitCriterion() {
@Override
public boolean done() {
return cache.getCacheServers().size() == 1;
}
@Override
public String description() {
return "cache.getCacheServers().size() == 1";
}
};
Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
assertNotNull(bs);
assertNotNull(bs.getAcceptor());
assertNotNull(bs.getAcceptor().getCacheClientNotifier());
final CacheClientNotifier ccn = bs.getAcceptor().getCacheClientNotifier();
wc = new WaitCriterion() {
@Override
public boolean done() {
return ccn.getClientProxies().size() > 0;
}
@Override
public String description() {
return "waiting for ccn.getClientProxies().size() > 0";
}
};
Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
Iterator iter_prox = ccn.getClientProxies().iterator();
if (iter_prox.hasNext()) {
CacheClientProxy proxy = (CacheClientProxy) iter_prox.next();
assertFalse("Dispatcher on secondary should not be alive", proxy._messageDispatcher.isAlive());
}
}
use of org.apache.geode.internal.cache.CacheServerImpl in project geode by apache.
the class CacheCreation method startCacheServers.
/**
* starts declarative cache servers if a server is not running on the port already. Also adds a
* default server to the param declarativeCacheServers if a serverPort is specified.
*/
void startCacheServers(List<CacheServer> declarativeCacheServers, Cache cache, Integer serverPort, String serverBindAdd, Boolean disableDefaultServer) {
if (declarativeCacheServers.size() > 1 && (serverPort != null || serverBindAdd != null)) {
throw new RuntimeException(LocalizedStrings.CacheServerLauncher_SERVER_PORT_MORE_THAN_ONE_CACHE_SERVER.toLocalizedString());
}
CacheServerCreation defaultServer = null;
boolean hasServerPortOrBindAddress = serverPort != null || serverBindAdd != null;
boolean isDefaultServerDisabled = disableDefaultServer == null || !disableDefaultServer;
if (declarativeCacheServers.isEmpty() && hasServerPortOrBindAddress && isDefaultServerDisabled) {
boolean existingCacheServer = false;
List<CacheServer> cacheServers = cache.getCacheServers();
if (cacheServers != null) {
for (CacheServer cacheServer : cacheServers) {
if (serverPort == cacheServer.getPort()) {
existingCacheServer = true;
}
}
}
if (!existingCacheServer) {
defaultServer = new CacheServerCreation((InternalCache) cache, false);
declarativeCacheServers.add(defaultServer);
}
}
for (CacheServer declarativeCacheServer : declarativeCacheServers) {
CacheServerCreation declaredCacheServer = (CacheServerCreation) declarativeCacheServer;
boolean startServer = true;
List<CacheServer> cacheServers = cache.getCacheServers();
if (cacheServers != null) {
for (CacheServer cacheServer : cacheServers) {
if (declaredCacheServer.getPort() == cacheServer.getPort()) {
startServer = false;
}
}
}
if (!startServer) {
continue;
}
CacheServerImpl impl = (CacheServerImpl) cache.addCacheServer();
impl.configureFrom(declaredCacheServer);
if (declaredCacheServer == defaultServer) {
impl.setIsDefaultServer();
}
if (serverPort != null && serverPort != CacheServer.DEFAULT_PORT) {
impl.setPort(serverPort);
}
if (serverBindAdd != null) {
impl.setBindAddress(serverBindAdd.trim());
}
try {
if (!impl.isRunning()) {
impl.start();
}
} catch (IOException ex) {
throw new GemFireIOException(LocalizedStrings.CacheCreation_WHILE_STARTING_CACHE_SERVER_0.toLocalizedString(impl), ex);
}
}
}
use of org.apache.geode.internal.cache.CacheServerImpl in project geode by apache.
the class GMSMembershipManager method saveCacheXmlForReconnect.
/** generate XML from the cache before shutting down due to forced disconnect */
public void saveCacheXmlForReconnect(boolean sharedConfigEnabled) {
// first save the current cache description so reconnect can rebuild the cache
InternalCache cache = GemFireCacheImpl.getInstance();
if (cache != null) {
if (!Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "autoReconnect-useCacheXMLFile") && !sharedConfigEnabled) {
try {
logger.info("generating XML to rebuild the cache after reconnect completes");
StringPrintWriter pw = new StringPrintWriter();
CacheXmlGenerator.generate((Cache) cache, pw, true, false);
String cacheXML = pw.toString();
cache.getCacheConfig().setCacheXMLDescription(cacheXML);
logger.info("XML generation completed: {}", cacheXML);
} catch (CancelException e) {
logger.info(LocalizedMessage.create(LocalizedStrings.GroupMembershipService_PROBLEM_GENERATING_CACHE_XML), e);
}
} else if (sharedConfigEnabled && !cache.getCacheServers().isEmpty()) {
// we need to retain a cache-server description if this JVM was started by gfsh
List<CacheServerCreation> list = new ArrayList<>(cache.getCacheServers().size());
for (final Object o : cache.getCacheServers()) {
CacheServerImpl cs = (CacheServerImpl) o;
if (cs.isDefaultServer()) {
CacheServerCreation bsc = new CacheServerCreation(cache, cs);
list.add(bsc);
}
}
cache.getCacheConfig().setCacheServerCreation(list);
logger.info("CacheServer configuration saved");
}
}
}
use of org.apache.geode.internal.cache.CacheServerImpl in project geode by apache.
the class Bug38741DUnitTest method testCopyOnReadWithBridgeServer.
/**
* Test that CopyOnRead doesn't cause {@link HARegionQueue#peek()} to create a copy, assuming that
* creating copies performs a serialize and de-serialize operation.
*
* @throws Exception when there is a failure
* @since GemFire bugfix5.7
*/
@Test
public void testCopyOnReadWithBridgeServer() throws Exception {
final Host h = Host.getHost(0);
final VM client = h.getVM(2);
final VM server = h.getVM(3);
final String rName = getUniqueName();
final int[] ports = createUniquePorts(1);
final String k1 = "k1";
final String k2 = "k2";
final String k3 = "k3";
createBridgeServer(server, rName, ports[0]);
// Put an instance of SerializationCounter to assert copy-on-read behavior
// when notifyBySubscription is true
server.invoke(new CacheSerializableRunnable("Enable copy on read and assert server copy behavior") {
public void run2() throws CacheException {
final LocalRegion r = (LocalRegion) getRootRegion(rName);
// Using a key that counts serialization, the test captures
// any serialization of the key when it is a member of another object,
// specifically in this case ClientUpdateMessageImpl which is assume to be
// the value of a HARegion
SerializationCountingKey key = new SerializationCountingKey(k1);
byte[] val = new byte[1];
byte valIsObj = 0x01;
Integer cb = new Integer(0);
ClientProxyMembershipID cpmi = null;
EventID eid = null;
ClientUpdateMessageImpl cui = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_CREATE, r, key, val, valIsObj, cb, cpmi, eid);
ClientUpdateMessageImpl cuiCopy = (ClientUpdateMessageImpl) CopyHelper.copy(cui);
assertSame(key, cui.getKeyOfInterest());
assertEquals(1, key.count.get());
key = (SerializationCountingKey) cuiCopy.getKeyOfInterest();
assertEquals(cui.getKeyOfInterest(), cuiCopy.getKeyOfInterest());
assertEquals(1, key.count.get());
SerializationCountingKey ks1 = new SerializationCountingKey(k1);
{
// Make sure nothing (HARegion) has serialized/de-serialized this instance
SerializationCountingValue sc = new SerializationCountingValue();
r.put(ks1, sc);
assertEquals(0, sc.count.get());
assertEquals(0, ks1.count.get());
}
{
// No copy should be made upon get (assert standard no copy behavior)
SerializationCountingValue sc = (SerializationCountingValue) r.get(ks1);
assertEquals(0, sc.count.get());
assertEquals(0, ks1.count.get());
}
// enable copy on read
getCache().setCopyOnRead(true);
{
// Assert standard copy on read behavior
SerializationCountingValue sc = (SerializationCountingValue) r.get(ks1);
assertEquals(1, sc.count.get());
assertEquals(0, ks1.count.get());
}
{
// Put another counter with copy-on-read true
// Again check that nothing (HARegion) has performed serialization
SerializationCountingValue sc = new SerializationCountingValue();
SerializationCountingKey ks3 = new SerializationCountingKey(k3);
r.put(ks3, sc);
assertEquals(0, sc.count.get());
assertEquals(0, ks3.count.get());
}
}
});
// Setup a client which subscribes to the server region, registers (aka pulls)
// interest in keys which creates an assumed HARegionQueue on the server
// (in the event that the above code didn't already create a HARegion)
final String serverHostName = NetworkUtils.getServerHostName(server.getHost());
client.invoke(new CacheSerializableRunnable("Assert server copy behavior from client") {
public void run2() throws CacheException {
getCache();
AttributesFactory factory = new AttributesFactory();
ClientServerTestCase.configureConnectionPool(factory, serverHostName, ports, true, -1, 1, null);
factory.setScope(Scope.LOCAL);
Region r = createRootRegion(rName, factory.create());
SerializationCountingKey ks1 = new SerializationCountingKey(k1);
SerializationCountingKey ks3 = new SerializationCountingKey(k3);
r.registerInterest(ks1, InterestResultPolicy.KEYS_VALUES);
// entry
r.registerInterest(new SerializationCountingKey(k2), InterestResultPolicy.KEYS_VALUES);
// shouldn't
// exist
// yet
r.registerInterest(ks3, InterestResultPolicy.KEYS_VALUES);
{
// Once for the get on the server, once to send the value to this client
SerializationCountingValue sc = (SerializationCountingValue) r.get(ks1);
assertEquals(2, sc.count.get());
}
{
// Once to send the value to this client
SerializationCountingValue sc = (SerializationCountingValue) r.get(ks3);
assertEquals(1, sc.count.get());
}
}
});
// Put an instance of SerializationCounter to assert copy-on-read behavior
// once a client has registered interest
server.invoke(new CacheSerializableRunnable("Assert copy behavior after client is setup") {
public void run2() throws CacheException {
Region r = getRootRegion(rName);
CacheServerImpl bsi = (CacheServerImpl) getCache().getCacheServers().iterator().next();
Collection cp = bsi.getAcceptor().getCacheClientNotifier().getClientProxies();
// Should only be one because only one client is connected
assertEquals(1, cp.size());
final CacheClientProxy ccp = (CacheClientProxy) cp.iterator().next();
// Wait for messages to drain to capture a stable "processed message count"
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return ccp.getHARegionQueue().size() == 0;
}
public String description() {
return "region queue never became empty";
}
};
Wait.waitForCriterion(ev, 60 * 1000, 200, true);
// Capture the current processed message count to know
// when the next message has been serialized
final int currMesgCount = ccp.getStatistics().getMessagesProcessed();
SerializationCountingKey ks2 = new SerializationCountingKey(k2);
SerializationCountingValue sc = new SerializationCountingValue();
// Update a key upon which the client has expressed interest,
// expect it to send an update message to the client
r.put(ks2, sc);
// Wait to know that the data has been at least serialized (possibly sent)
ev = new WaitCriterion() {
public boolean done() {
return ccp.getStatistics().getMessagesProcessed() != currMesgCount;
}
public String description() {
return null;
}
};
Wait.waitForCriterion(ev, 60 * 1000, 200, true);
// assert one serialization to send value to interested client
// more than one implies copy-on-read behavior (bad)
assertEquals(1, sc.count.get());
assertEquals(1, ks2.count.get());
}
});
// Double-check the serialization count in the event that the previous check
// missed the copy due to race conditions
client.invoke(new CacheSerializableRunnable("Assert copy behavior from client after update") {
public void run2() throws CacheException {
Region r = getRootRegion(rName);
{
// Once to send the value to this client via the updater thread
SerializationCountingKey ks2 = new SerializationCountingKey(k2);
// Wait for the update to arrive on to the Cache Client Updater
long start = NanoTimer.getTime();
final int maxSecs = 30;
while (!r.containsKey(ks2)) {
Wait.pause(100);
if ((NanoTimer.getTime() - start) > TimeUnit.SECONDS.toNanos(maxSecs)) {
fail("Waited over " + maxSecs + "s");
}
}
SerializationCountingValue sc = (SerializationCountingValue) r.getEntry(ks2).getValue();
assertEquals(1, sc.count.get());
}
}
});
}
Aggregations