use of org.jivesoftware.openfire.session.DomainPair in project Openfire by igniterealtime.
the class S2STestService method logSessionStatus.
/**
* Logs the status of the session.
*/
private void logSessionStatus() {
final DomainPair pair = new DomainPair(XMPPServer.getInstance().getServerInfo().getXMPPDomain(), domain);
OutgoingServerSession session = XMPPServer.getInstance().getSessionManager().getOutgoingServerSession(pair);
if (session != null) {
int connectionStatus = session.getStatus();
switch(connectionStatus) {
case Session.STATUS_CONNECTED:
Log.info("Session is connected.");
break;
case Session.STATUS_CLOSED:
Log.info("Session is closed.");
break;
case Session.STATUS_AUTHENTICATED:
Log.info("Session is authenticated.");
break;
}
} else {
Log.info("Failed to establish server to server session.");
}
}
use of org.jivesoftware.openfire.session.DomainPair in project Openfire by igniterealtime.
the class RemoteServerManager method setPermissionPolicy.
/**
* Sets the permission policy being used for new XMPP entities that are trying to
* connect to the server. There are two types of policies: 1) blacklist: where any entity
* is allowed to connect to the server except for those listed in the black list and
* 2) whitelist: where only the entities listed in the white list are allowed to connect to
* the server.
*
* @param policy the new PermissionPolicy to use.
*/
public static void setPermissionPolicy(PermissionPolicy policy) {
JiveGlobals.setProperty(ConnectionSettings.Server.PERMISSION_SETTINGS, policy.toString());
// Check if the connected servers can remain connected to the server
for (String hostname : SessionManager.getInstance().getIncomingServers()) {
if (!canAccess(hostname)) {
for (Session session : SessionManager.getInstance().getIncomingServerSessions(hostname)) {
Log.debug("Closing session for hostname '{}' as a changed permission policy is taken into effect. Affected session: {}", hostname, session);
session.close();
}
}
}
for (DomainPair domainPair : SessionManager.getInstance().getOutgoingDomainPairs()) {
if (!canAccess(domainPair.getRemote())) {
Session session = SessionManager.getInstance().getOutgoingServerSession(domainPair);
Log.debug("Closing session as a changed permission policy is taken into effect. Affected session: {}", session);
session.close();
// After the session has been close, inform all listeners as well.
ServerSessionEventDispatcher.dispatchEvent(session, ServerSessionEventDispatcher.EventType.session_destroyed);
}
}
}
use of org.jivesoftware.openfire.session.DomainPair in project Openfire by igniterealtime.
the class RoutingTableImpl method removeClientRoute.
@Override
public boolean removeClientRoute(JID route) {
if (route.getResource() == null) {
throw new IllegalArgumentException("For removing a client route, the argument 'route' must be a full JID, but was " + route);
}
boolean anonymous = false;
boolean sessionRemoved = false;
String address = route.toString();
ClientRoute clientRoute;
Lock lockU = usersCache.getLock(address);
lockU.lock();
int cacheSizeBefore = usersCache.size();
try {
clientRoute = usersCache.remove(address);
} finally {
lockU.unlock();
}
Log.debug("Removed users cache entry for {} / {}, changing entry count from {} to {}", route, clientRoute, cacheSizeBefore, usersCache.size());
if (clientRoute == null) {
Lock lockA = anonymousUsersCache.getLock(address);
lockA.lock();
try {
clientRoute = anonymousUsersCache.remove(address);
anonymous = true;
} finally {
lockA.unlock();
}
}
final String bareJID = route.toBareJID();
if (usersSessionsCache.containsKey(bareJID)) {
// The user session still needs to be removed
if (clientRoute == null) {
Log.warn("Client route not found for route {}, while user session still exists, Current content of users cache is {}", bareJID, usersCache);
}
Lock lock = usersSessionsCache.getLock(bareJID);
lock.lock();
try {
if (anonymous) {
sessionRemoved = usersSessionsCache.remove(bareJID) != null;
} else {
HashSet<String> jids = usersSessionsCache.get(bareJID);
if (jids != null) {
sessionRemoved = jids.remove(route.toString());
if (!jids.isEmpty()) {
usersSessionsCache.put(bareJID, jids);
} else {
usersSessionsCache.remove(bareJID);
}
}
}
} finally {
lock.unlock();
}
}
Log.debug("Removing client route {} from local routing table", route);
localRoutingTable.removeRoute(new DomainPair("", route.toString()));
return sessionRemoved;
}
use of org.jivesoftware.openfire.session.DomainPair in project Openfire by igniterealtime.
the class RoutingTableImpl method detectAndFixBrokenCaches.
/**
* When the local node drops out of the cluster (for example, due to a network failure), then from the perspective
* of that node, all other nodes leave the cluster. Under certain circumstances, this can mean that the local node
* no longer has access to all data (or its backups) that is maintained in the clustered caches. From the
* perspective of the remaining node, this data is lost. (OF-2297/OF-2300). To prevent this being an issue, most
* caches have supporting local data structures that maintain a copy of the most critical bits of the data stored in
* the clustered cache. This local copy can be used to detect and/or correct such a loss in data. This is performed
* by this method.
*
* Note that this method is expected to be called as part of {@link #leftCluster(byte[])} only. It will therefor
* mostly restore data that is considered local to the server node, and won't bother with data that's considered
* to be pertinent to other cluster nodes only (as that data will be removed directly after invocation of this
* method anyway).
*
* Note that this method does <em>not</em> process the users sessions cache, as that's a bit of an odd one out. This
* cache is being processed in {@link #restoreUsersSessionsCache()}.
*/
private void detectAndFixBrokenCaches() {
// Ensure that 'serversCache' has content that reflects the locally available s2s connections (we do not need to
// restore the s2s connections on other nodes, as those will be dropped right after invoking this method anyway).
Log.info("Looking for local server routes that have 'dropped out' of the cache (likely as a result of a network failure).");
final Collection<LocalOutgoingServerSession> localServerRoutes = localRoutingTable.getServerRoutes();
final Set<DomainPair> cachesServerRoutes = serversCache.keySet();
final Set<DomainPair> serverRoutesNotInCache = localServerRoutes.stream().map(LocalOutgoingServerSession::getOutgoingDomainPairs).flatMap(Collection::stream).collect(Collectors.toSet());
serverRoutesNotInCache.removeAll(cachesServerRoutes);
if (serverRoutesNotInCache.isEmpty()) {
Log.info("Found no local server routes that are missing from the cache.");
} else {
Log.warn("Found {} server routes that we know locally, but are not (no longer) in the cache. This can occur when a cluster node fails, but should not occur otherwise. Missing server routes: {}", serverRoutesNotInCache.size(), serverRoutesNotInCache.stream().map(DomainPair::toString).collect(Collectors.joining(", ")));
for (final DomainPair missing : serverRoutesNotInCache) {
Log.info("Restoring server route: {}", missing);
serversCache.put(missing, XMPPServer.getInstance().getNodeID());
}
}
// Ensure that 'componentsCache' has content that reflects the locally available components. The component route
// cache is special in the sense that an entry is not directly related to a single cluster node. Therefor we
// need to ensure that all entries are in there, before surgically removing those that really need to be removed.
// Restore cache from 'remote' data structure
Log.info("Looking for and restoring component routes that have 'dropped out' of the cache (likely as a result of a network failure).");
componentsByClusterNode.forEach((key, value) -> {
for (final String componentDomain : value) {
CacheUtil.addValueToMultiValuedCache(componentsCache, componentDomain, key, HashSet::new);
}
});
// Restore cache from 'local' data structure
localRoutingTable.getComponentRoute().forEach(route -> CacheUtil.addValueToMultiValuedCache(componentsCache, route.getAddress().getDomain(), server.getNodeID(), HashSet::new));
// Ensure that 'usersCache' has content that reflects the locally available client connections (we do not need
// to restore the client connections on other nodes, as those will be dropped right after invoking this method anyway).
Log.info("Looking for local (non-anonymous) client routes that have 'dropped out' of the cache (likely as a result of a network failure).");
final Collection<LocalClientSession> localClientRoutes = localRoutingTable.getClientRoutes();
final Map<String, LocalClientSession> localUserRoutes = localClientRoutes.stream().filter(r -> !r.isAnonymousUser()).collect(Collectors.toMap((LocalClientSession localClientSession) -> localClientSession.getAddress().toString(), Function.identity()));
final Set<String> cachedUsersRoutes = usersCache.keySet();
final Set<String> userRoutesNotInCache = localUserRoutes.values().stream().map(LocalClientSession::getAddress).map(JID::toString).collect(Collectors.toSet());
userRoutesNotInCache.removeAll(cachedUsersRoutes);
if (userRoutesNotInCache.isEmpty()) {
Log.info("Found no local (non-anonymous) user routes that are missing from the cache.");
} else {
Log.warn("Found {} (non-anonymous) user routes that we know locally, but are not (no longer) in the cache. This can occur when a cluster node fails, but should not occur otherwise.", userRoutesNotInCache.size());
for (String missing : userRoutesNotInCache) {
Log.info("Restoring (non-anonymous) user routes: {}", missing);
final LocalClientSession localClientSession = localUserRoutes.get(missing);
// We've established this with the filtering above.
assert localClientSession != null;
addClientRoute(localClientSession.getAddress(), localClientSession);
}
}
// Ensure that 'anonymousUsersCache' has content that reflects the locally available client connections (we do not need
// to restore the client connections on other nodes, as those will be dropped right after invoking this method anyway).
Log.info("Looking for local (non-anonymous) client routes that have 'dropped out' of the cache (likely as a result of a network failure).");
final Map<String, LocalClientSession> localAnonymousUserRoutes = localClientRoutes.stream().filter(LocalClientSession::isAnonymousUser).collect(Collectors.toMap((LocalClientSession localClientSession) -> localClientSession.getAddress().toString(), Function.identity()));
final Set<String> cachedAnonymousUsersRoutes = anonymousUsersCache.keySet();
// defensive copy - we should not modify localAnonymousUserRoutes!
final Set<String> anonymousUserRoutesNotInCache = new HashSet<>(localAnonymousUserRoutes.keySet());
anonymousUserRoutesNotInCache.removeAll(cachedAnonymousUsersRoutes);
if (anonymousUserRoutesNotInCache.isEmpty()) {
Log.info("Found no local anonymous user routes that are missing from the cache.");
} else {
Log.warn("Found {} anonymous user routes that we know locally, but are not (no longer) in the cache. This can occur when a cluster node fails, but should not occur otherwise.", anonymousUserRoutesNotInCache.size());
for (String missing : anonymousUserRoutesNotInCache) {
Log.info("Restoring (non-anonymous) user route: {}", missing);
final LocalClientSession localClientSession = localAnonymousUserRoutes.get(missing);
// We've established this with the filtering above.
assert localClientSession != null;
addClientRoute(localClientSession.getAddress(), localClientSession);
}
}
}
use of org.jivesoftware.openfire.session.DomainPair in project Openfire by igniterealtime.
the class RoutingTableImpl method leftCluster.
@Override
public void leftCluster(byte[] nodeID) {
// Another node left the cluster.
final NodeID nodeIDOfLostNode = NodeID.getInstance(nodeID);
Log.debug("Cluster node {} just left the cluster.", nodeIDOfLostNode);
// When the local node drops out of the cluster (for example, due to a network failure), then from the perspective
// of that node, all other nodes leave the cluster. This method is invoked for each of them. In certain
// circumstances, this can mean that the local node no longer has access to all data (or its backups) that is
// maintained in the clustered caches. From the perspective of the remaining node, this data is lost. (OF-2297/OF-2300).
// To prevent this being an issue, most caches have supporting local data structures that maintain a copy of the most
// critical bits of the data stored in the clustered cache, which is to be used to detect and/or correct such a
// loss in data. This is done in the next few lines of this method.
// This excludes Users Sessions Cache, which is a bit of an odd duckling. This one is processed later in this method.
detectAndFixBrokenCaches();
// When a peer server leaves the cluster, any remote routes that were associated with the defunct node must be
// dropped from the routing caches (and supporting data structures) that are shared by the remaining cluster member(s).
// Note: All remaining cluster nodes will be in a race to clean up the same data. We can not depend on cluster
// seniority to appoint a 'single' cleanup node, because for a small moment we may not have a senior cluster member.
// Remove outgoing server routes accessed through the node that left the cluster.
final Set<DomainPair> remoteServers = s2sDomainPairsByClusterNode.remove(nodeIDOfLostNode);
// Clean up remote data from the cache, but note that the return value can't be guaranteed to be correct/complete (do not use it)!
CacheUtil.removeValueFromCache(serversCache, nodeIDOfLostNode);
if (remoteServers != null) {
for (final DomainPair domainPair : remoteServers) {
Log.debug("Removing server route for {} that is no longer available because cluster node {} left the cluster.", domainPair, nodeIDOfLostNode);
removeServerRoute(domainPair);
}
}
Log.info("Cluster node {} just left the cluster. A total of {} outgoing server sessions was living there, and are no longer available.", nodeIDOfLostNode, remoteServers == null ? 0 : remoteServers.size());
// Remove component routes hosted in node that left the cluster.
final Set<String> componentJids = componentsByClusterNode.remove(nodeIDOfLostNode);
// Clean up remote data from the cache, but note that the return value can't be guaranteed to be correct/complete (do not use it)!
CacheUtil.removeValueFromMultiValuedCache(componentsCache, nodeIDOfLostNode);
int lostComponentsCount = 0;
if (componentJids != null) {
Log.debug("Removing node '{}' from componentsByClusteredNode: {}", nodeIDOfLostNode, componentJids);
for (final String componentJid : componentJids) {
if (removeComponentRoute(new JID(componentJid), nodeIDOfLostNode)) {
Log.debug("Removing component route for {} that is no longer available because cluster node {} left the cluster.", componentJid, nodeIDOfLostNode);
lostComponentsCount++;
;
}
}
}
Log.info("Cluster node {} just left the cluster. A total of {} component sessions is now no longer available as a result.", nodeIDOfLostNode, lostComponentsCount);
// Remove client routes hosted in node that left the cluster.
final Set<String> removed = routeOwnersByClusterNode.remove(nodeIDOfLostNode);
final AtomicLong removedSessionCount = new AtomicLong();
if (removed != null) {
removed.forEach(fullJID -> {
Log.debug("Removing client route for {} that is no longer available because cluster node {} left the cluster.", fullJID, nodeIDOfLostNode);
final JID offlineJID = new JID(fullJID);
removeClientRoute(offlineJID);
removedSessionCount.incrementAndGet();
});
}
Log.debug("Cluster node {} just left the cluster. A total of {} client routes was living there, and are no longer available.", nodeIDOfLostNode, removedSessionCount.get());
// With all of the other caches fixed and adjusted, process the Users Sessions Cache.
restoreUsersSessionsCache();
// Now that the users sessions cache is restored, we can proceed sending presence updates for all removed users.
if (removed != null) {
removed.forEach(fullJID -> {
final JID offlineJID = new JID(fullJID);
try {
final Presence presence = new Presence(Presence.Type.unavailable);
presence.setFrom(offlineJID);
XMPPServer.getInstance().getPresenceRouter().route(presence);
// TODO: OF-2302 This broadcasts the presence over the entire (remaining) cluster, which is too much because it is done by each remaining cluster node.
} catch (final PacketException e) {
Log.error("Remote node {} left the cluster. Users on that node are no longer available. To reflect this, we're broadcasting presence unavailable on their behalf. While doing this for '{}', this caused an exception to occur.", nodeIDOfLostNode, fullJID, e);
}
});
}
}
Aggregations