use of org.apache.geode.distributed.internal.ServerLocation in project geode by apache.
the class DurableClientReconnectDUnitTest method testBug39332.
/**
* This test checks a problem found in bug 39332 1. Durable client disconnects 2. Durable client
* comes back, creates a create to server connection but not a queue connection 3. Durable client
* disconnects again 4. Durable client connects 5. Eventually, all of the durable clients
* connections are closed because the durable expiration timer task created in step 1 is never
* cancelled.
*/
@Test
public void testBug39332() {
// create client cache and Send clientReady message
createCacheClient(2, 20);
HashSet redundantServers = new HashSet(pool.getRedundantNames());
redundantServers.add(pool.getPrimaryName());
instance.determineAndVerfiyRedundantServers(redundantServers);
instance.determineAndVerfiyNonRedundantServers(redundantServers);
LogWriterUtils.getLogWriter().info("TEST - Durable client initialially has servers " + redundantServers);
LogWriterUtils.getLogWriter().info("TEST - Closing durable client for the first time");
// Stop the durable client
closeCache(true);
LogWriterUtils.getLogWriter().info("TEST - Durable client closed for the first time");
// Wait for server to cleanup client resources
// temporary fix for bug 38345.
Wait.pause(2000);
LogWriterUtils.getLogWriter().info("TEST - Creating the durable client with one fewer servers");
// We recreate the durable client, but this
// Time we won't have it create any queues
createCacheClient(2, 20, false);
HashSet redundantServers2 = new HashSet(pool.getRedundantNames());
redundantServers2.add(pool.getPrimaryName());
LogWriterUtils.getLogWriter().info("TEST - Durable client created again, now with servers " + redundantServers2);
Host host = Host.getHost(0);
// Make sure we create client to server connections to all of the servers
pool.acquireConnection(new ServerLocation(NetworkUtils.getServerHostName(host), PORT1.intValue()));
pool.acquireConnection(new ServerLocation(NetworkUtils.getServerHostName(host), PORT2.intValue()));
pool.acquireConnection(new ServerLocation(NetworkUtils.getServerHostName(host), PORT3.intValue()));
pool.acquireConnection(new ServerLocation(NetworkUtils.getServerHostName(host), PORT4.intValue()));
LogWriterUtils.getLogWriter().info("TEST - All pool connections are now aquired");
closeCache(true);
LogWriterUtils.getLogWriter().info("TEST - closed durable client for the second time");
// Wait for server to cleanup client resources
// temporary fix for bug 38345.
Wait.pause(2000);
LogWriterUtils.getLogWriter().info("TEST - creating durable client for the third time");
// Now we should connect to all of the servers we were originally connected to
createCacheClient(2, 20);
HashSet redundantServersAfterReconnect = new HashSet(pool.getRedundantNames());
redundantServersAfterReconnect.add(pool.getPrimaryName());
LogWriterUtils.getLogWriter().info("TEST - durable client created for the third time, now with servers " + redundantServersAfterReconnect);
instance.determineAndVerfiyRedundantServers(redundantServersAfterReconnect);
instance.determineAndVerfiyNonRedundantServers(redundantServersAfterReconnect);
assertEquals(redundantServers, redundantServersAfterReconnect);
// Now we wait to make sure the durable client expiration task isn't fired.
Wait.pause(25000);
LogWriterUtils.getLogWriter().info("TEST - Finished waiting for durable client expiration task");
redundantServersAfterReconnect = new HashSet(pool.getRedundantNames());
redundantServersAfterReconnect.add(pool.getPrimaryName());
instance.determineAndVerfiyRedundantServers(redundantServersAfterReconnect);
instance.determineAndVerfiyNonRedundantServers(redundantServersAfterReconnect);
assertEquals(redundantServers, redundantServersAfterReconnect);
}
use of org.apache.geode.distributed.internal.ServerLocation in project geode by apache.
the class RetrieveRegionMetadataFunction method bucketServerMap2ServerBucketSetMap.
/** convert bucket to server map to server to bucket set map */
private HashMap<ServerLocation, HashSet<Integer>> bucketServerMap2ServerBucketSetMap(Map<Integer, List<BucketServerLocation66>> map) {
HashMap<ServerLocation, HashSet<Integer>> serverBucketMap = new HashMap<>();
for (Integer id : map.keySet()) {
List<BucketServerLocation66> locations = map.get(id);
for (BucketServerLocation66 location : locations) {
ServerLocation server = new ServerLocation(location.getHostName(), location.getPort());
if (location.isPrimary()) {
HashSet<Integer> set = serverBucketMap.get(server);
if (set == null) {
set = new HashSet<>();
serverBucketMap.put(server, set);
}
set.add(id);
break;
}
}
}
return serverBucketMap;
}
use of org.apache.geode.distributed.internal.ServerLocation in project geode by apache.
the class RetrieveRegionMetadataFunction method execute.
@Override
public void execute(FunctionContext context) {
LocalRegion region = (LocalRegion) ((InternalRegionFunctionContext) context).getDataSet();
String regionPath = region.getFullPath();
boolean isPartitioned = region.getDataPolicy().withPartitioning();
String kTypeName = getTypeClassName(region.getAttributes().getKeyConstraint());
String vTypeName = getTypeClassName(region.getAttributes().getValueConstraint());
RegionMetadata metadata;
if (!isPartitioned) {
metadata = new RegionMetadata(regionPath, false, 0, null, kTypeName, vTypeName);
} else {
PartitionedRegion pregion = (PartitionedRegion) region;
int totalBuckets = pregion.getAttributes().getPartitionAttributes().getTotalNumBuckets();
Map<Integer, List<BucketServerLocation66>> bucketMap = pregion.getRegionAdvisor().getAllClientBucketProfiles();
HashMap<ServerLocation, HashSet<Integer>> serverMap = bucketServerMap2ServerBucketSetMap(bucketMap);
metadata = new RegionMetadata(regionPath, true, totalBuckets, serverMap, kTypeName, vTypeName);
}
ResultSender<RegionMetadata> sender = context.getResultSender();
sender.lastResult(metadata);
}
use of org.apache.geode.distributed.internal.ServerLocation in project geode by apache.
the class GatewaySenderEventRemoteDispatcher method initializeConnection.
/**
* Initializes the <code>Connection</code>.
*
* @throws GatewaySenderException
*/
private void initializeConnection() throws GatewaySenderException, GemFireSecurityException {
this.connectionLifeCycleLock.writeLock().lock();
try {
// Attempt to acquire a connection
if (this.sender.getProxy() == null || this.sender.getProxy().isDestroyed()) {
this.sender.initProxy();
} else {
this.processor.resetBatchId();
}
Connection con;
try {
if (this.sender.isParallel()) {
/*
* TODO - The use of acquireConnection should be removed from the gateway code. This
* method is fine for tests, but these connections should really be managed inside the
* pool code. If the gateway needs to persistent connection to a single server, which
* should create have the OpExecutor that holds a reference to the connection (similar to
* the way we do with thread local connections). Use {@link
* ExecutablePool#setupServerAffinity(boolean)} for gateway code
*/
con = this.sender.getProxy().acquireConnection();
// For parallel sender, setting server location will not matter.
// everytime it will ask for acquire connection whenever it needs it. I
// am saving this server location for command purpose
sender.setServerLocation(con.getServer());
} else {
synchronized (this.sender.getLockForConcurrentDispatcher()) {
ServerLocation server = this.sender.getServerLocation();
if (server != null) {
if (logger.isDebugEnabled()) {
logger.debug("ServerLocation is: {}. Connecting to this serverLocation...", server);
}
con = this.sender.getProxy().acquireConnection(server);
} else {
if (logger.isDebugEnabled()) {
logger.debug("ServerLocation is null. Creating new connection. ");
}
con = this.sender.getProxy().acquireConnection();
// PRIMARY
if (this.sender.isPrimary()) {
if (sender.getServerLocation() == null) {
sender.setServerLocation(con.getServer());
}
new UpdateAttributesProcessor(this.sender).distribute(false);
}
}
}
}
} catch (ServerConnectivityException e) {
this.failedConnectCount++;
Throwable ex = null;
if (e.getCause() instanceof GemFireSecurityException) {
ex = e.getCause();
if (logConnectionFailure()) {
// only log this message once; another msg is logged once we connect
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayEventRemoteDispatcher_0_COULD_NOT_CONNECT_1, new Object[] { this.processor.getSender().getId(), ex.getMessage() }));
}
throw new GatewaySenderException(ex);
}
List<ServerLocation> servers = this.sender.getProxy().getCurrentServers();
String ioMsg = null;
if (servers.size() == 0) {
ioMsg = LocalizedStrings.GatewayEventRemoteDispatcher_THERE_ARE_NO_ACTIVE_SERVERS.toLocalizedString();
} else {
final StringBuilder buffer = new StringBuilder();
for (ServerLocation server : servers) {
String endpointName = String.valueOf(server);
if (buffer.length() > 0) {
buffer.append(", ");
}
buffer.append(endpointName);
}
ioMsg = LocalizedStrings.GatewayEventRemoteDispatcher_NO_AVAILABLE_CONNECTION_WAS_FOUND_BUT_THE_FOLLOWING_ACTIVE_SERVERS_EXIST_0.toLocalizedString(buffer.toString());
}
ex = new IOException(ioMsg);
// Set the serverLocation to null so that a new connection can be
// obtained in next attempt
this.sender.setServerLocation(null);
if (this.failedConnectCount == 1) {
// only log this message once; another msg is logged once we connect
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayEventRemoteDispatcher__0___COULD_NOT_CONNECT, this.processor.getSender().getId()));
}
// same as the other exceptions that might occur in sendBatch.
throw new GatewaySenderException(LocalizedStrings.GatewayEventRemoteDispatcher__0___COULD_NOT_CONNECT.toLocalizedString(this.processor.getSender().getId()), ex);
}
if (this.failedConnectCount > 0) {
Object[] logArgs = new Object[] { this.processor.getSender().getId(), con, Integer.valueOf(this.failedConnectCount) };
logger.info(LocalizedMessage.create(LocalizedStrings.GatewayEventRemoteDispatcher_0_USING_1_AFTER_2_FAILED_CONNECT_ATTEMPTS, logArgs));
this.failedConnectCount = 0;
} else {
Object[] logArgs = new Object[] { this.processor.getSender().getId(), con };
logger.info(LocalizedMessage.create(LocalizedStrings.GatewayEventRemoteDispatcher_0_USING_1, logArgs));
}
this.connection = con;
this.processor.checkIfPdxNeedsResend(this.connection.getQueueStatus().getPdxSize());
} catch (ConnectionDestroyedException e) {
throw new GatewaySenderException(LocalizedStrings.GatewayEventRemoteDispatcher__0___COULD_NOT_CONNECT.toLocalizedString(this.processor.getSender().getId()), e);
} finally {
this.connectionLifeCycleLock.writeLock().unlock();
}
}
use of org.apache.geode.distributed.internal.ServerLocation in project geode by apache.
the class WANTestBase method getSenderToReceiverConnectionInfo.
public static Map getSenderToReceiverConnectionInfo(String senderId) {
Set<GatewaySender> senders = cache.getGatewaySenders();
GatewaySender sender = null;
for (GatewaySender s : senders) {
if (s.getId().equals(senderId)) {
sender = s;
break;
}
}
Map connectionInfo = null;
if (!sender.isParallel() && ((AbstractGatewaySender) sender).isPrimary()) {
connectionInfo = new HashMap();
GatewaySenderEventDispatcher dispatcher = ((AbstractGatewaySender) sender).getEventProcessor().getDispatcher();
if (dispatcher instanceof GatewaySenderEventRemoteDispatcher) {
ServerLocation serverLocation = ((GatewaySenderEventRemoteDispatcher) dispatcher).getConnection(false).getServer();
connectionInfo.put("serverHost", serverLocation.getHostName());
connectionInfo.put("serverPort", serverLocation.getPort());
}
}
return connectionInfo;
}
Aggregations