use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.
the class ClientSessionTask method run.
public void run() {
if (getSession() == null || getSession().isClosed()) {
logger.error("Session not found for JID: " + address);
return;
}
super.run();
ClientSession session = (ClientSession) getSession();
if (session instanceof RemoteClientSession) {
// The session is being hosted by other cluster node so log this unexpected case
Cache<String, ClientRoute> usersCache = CacheFactory.createCache(RoutingTableImpl.C2S_CACHE_NAME);
ClientRoute route = usersCache.get(address.toString());
NodeID nodeID = route.getNodeID();
logger.warn("Found remote session instead of local session. JID: " + address + " found in Node: " + nodeID.toByteArray() + " and local node is: " + XMPPServer.getInstance().getNodeID().toByteArray());
}
if (operation == Operation.isInitialized) {
if (session instanceof RemoteClientSession) {
// Something is wrong since the session shoud be local instead of remote
// Assume some default value
result = true;
} else {
result = session.isInitialized();
}
} else if (operation == Operation.incrementConflictCount) {
if (session instanceof RemoteClientSession) {
// Something is wrong since the session shoud be local instead of remote
// Assume some default value
result = 2;
} else {
result = session.incrementConflictCount();
}
}
}
use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.
the class CacheListener method handleMapEvent.
private void handleMapEvent(MapEvent event) {
NodeID nodeID = NodeID.getInstance(StringUtils.getBytes(event.getMember().getUuid()));
// ignore events which were triggered by this node
if (!XMPPServer.getInstance().getNodeID().equals(nodeID)) {
Set<String> sessionJIDs = clusterListener.lookupJIDList(nodeID, cacheName);
sessionJIDs.clear();
}
}
use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.
the class PresenceUpdateHandler method joinedCluster.
@Override
public void joinedCluster() {
// The local node joined a cluster.
//
// Upon joining a cluster, clustered caches are reset to their clustered equivalent (by the swap from the local
// cache implementation to the clustered cache implementation that's done in the implementation of
// org.jivesoftware.util.cache.CacheFactory.joinedCluster). This means that they now hold data that's
// available on all other cluster nodes. Data that's available on the local node needs to be added again.
restoreCacheContent();
final DirectedPresenceListener listener = new DirectedPresenceListener();
Log.debug("Simulate 'entryAdded' for all data that already exists elsewhere in the cluster.");
directedPresencesCache.entrySet().stream().filter(entry -> !entry.getValue().isEmpty() && !Arrays.equals(entry.getValue().peek().getNodeID(), XMPPServer.getInstance().getNodeID().toByteArray())).forEach(entry -> {
// should be impossible given the filter above.
assert entry.getValue().peek() != null;
// We are assuming that the nodeID for every directed presence in the collection is equal.
final NodeID nodeID = NodeID.getInstance(entry.getValue().peek().getNodeID());
listener.entryAdded(entry.getKey(), entry.getValue(), nodeID);
});
// Add the entry listener to the cache. Note that, when #joinedCluster() fired, the cache will _always_ have been replaced,
// meaning that it won't have old event listeners. When #leaveCluster() fires, the cache will be destroyed. This
// takes away the need to explicitly deregister the listener in that case.
// This event handler needs to operate on cache values. We can't reduce overhead by suppressing value transmission.
final boolean includeValues = true;
directedPresencesCache.addClusteredCacheEntryListener(listener, includeValues, false);
}
use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.
the class RoutingTableImpl method leftCluster.
@Override
public void leftCluster(byte[] nodeID) {
// Another node left the cluster.
final NodeID nodeIDOfLostNode = NodeID.getInstance(nodeID);
Log.debug("Cluster node {} just left the cluster.", nodeIDOfLostNode);
// When the local node drops out of the cluster (for example, due to a network failure), then from the perspective
// of that node, all other nodes leave the cluster. This method is invoked for each of them. In certain
// circumstances, this can mean that the local node no longer has access to all data (or its backups) that is
// maintained in the clustered caches. From the perspective of the remaining node, this data is lost. (OF-2297/OF-2300).
// To prevent this being an issue, most caches have supporting local data structures that maintain a copy of the most
// critical bits of the data stored in the clustered cache, which is to be used to detect and/or correct such a
// loss in data. This is done in the next few lines of this method.
// This excludes Users Sessions Cache, which is a bit of an odd duckling. This one is processed later in this method.
detectAndFixBrokenCaches();
// When a peer server leaves the cluster, any remote routes that were associated with the defunct node must be
// dropped from the routing caches (and supporting data structures) that are shared by the remaining cluster member(s).
// Note: All remaining cluster nodes will be in a race to clean up the same data. We can not depend on cluster
// seniority to appoint a 'single' cleanup node, because for a small moment we may not have a senior cluster member.
// Remove outgoing server routes accessed through the node that left the cluster.
final Set<DomainPair> remoteServers = s2sDomainPairsByClusterNode.remove(nodeIDOfLostNode);
// Clean up remote data from the cache, but note that the return value can't be guaranteed to be correct/complete (do not use it)!
CacheUtil.removeValueFromCache(serversCache, nodeIDOfLostNode);
if (remoteServers != null) {
for (final DomainPair domainPair : remoteServers) {
Log.debug("Removing server route for {} that is no longer available because cluster node {} left the cluster.", domainPair, nodeIDOfLostNode);
removeServerRoute(domainPair);
}
}
Log.info("Cluster node {} just left the cluster. A total of {} outgoing server sessions was living there, and are no longer available.", nodeIDOfLostNode, remoteServers == null ? 0 : remoteServers.size());
// Remove component routes hosted in node that left the cluster.
final Set<String> componentJids = componentsByClusterNode.remove(nodeIDOfLostNode);
// Clean up remote data from the cache, but note that the return value can't be guaranteed to be correct/complete (do not use it)!
CacheUtil.removeValueFromMultiValuedCache(componentsCache, nodeIDOfLostNode);
int lostComponentsCount = 0;
if (componentJids != null) {
Log.debug("Removing node '{}' from componentsByClusteredNode: {}", nodeIDOfLostNode, componentJids);
for (final String componentJid : componentJids) {
if (removeComponentRoute(new JID(componentJid), nodeIDOfLostNode)) {
Log.debug("Removing component route for {} that is no longer available because cluster node {} left the cluster.", componentJid, nodeIDOfLostNode);
lostComponentsCount++;
;
}
}
}
Log.info("Cluster node {} just left the cluster. A total of {} component sessions is now no longer available as a result.", nodeIDOfLostNode, lostComponentsCount);
// Remove client routes hosted in node that left the cluster.
final Set<String> removed = routeOwnersByClusterNode.remove(nodeIDOfLostNode);
final AtomicLong removedSessionCount = new AtomicLong();
if (removed != null) {
removed.forEach(fullJID -> {
Log.debug("Removing client route for {} that is no longer available because cluster node {} left the cluster.", fullJID, nodeIDOfLostNode);
final JID offlineJID = new JID(fullJID);
removeClientRoute(offlineJID);
removedSessionCount.incrementAndGet();
});
}
Log.debug("Cluster node {} just left the cluster. A total of {} client routes was living there, and are no longer available.", nodeIDOfLostNode, removedSessionCount.get());
// With all of the other caches fixed and adjusted, process the Users Sessions Cache.
restoreUsersSessionsCache();
// Now that the users sessions cache is restored, we can proceed sending presence updates for all removed users.
if (removed != null) {
removed.forEach(fullJID -> {
final JID offlineJID = new JID(fullJID);
try {
final Presence presence = new Presence(Presence.Type.unavailable);
presence.setFrom(offlineJID);
XMPPServer.getInstance().getPresenceRouter().route(presence);
// TODO: OF-2302 This broadcasts the presence over the entire (remaining) cluster, which is too much because it is done by each remaining cluster node.
} catch (final PacketException e) {
Log.error("Remote node {} left the cluster. Users on that node are no longer available. To reflect this, we're broadcasting presence unavailable on their behalf. While doing this for '{}', this caused an exception to occur.", nodeIDOfLostNode, fullJID, e);
}
});
}
}
use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.
the class RoutingTableImpl method removeComponentRoute.
/**
* Remove local or remote component route.
*
* @param route the route of the component to be removed.
* @param nodeID The node to which the to-be-removed component was connected to.
*/
private boolean removeComponentRoute(JID route, NodeID nodeID) {
String address = route.getDomain();
boolean removed = false;
Lock lock = componentsCache.getLock(address);
lock.lock();
try {
HashSet<NodeID> nodes = componentsCache.get(address);
if (nodes != null) {
nodes.remove(nodeID);
if (nodes.isEmpty()) {
componentsCache.remove(address);
removed = true;
} else {
componentsCache.put(address, nodes);
}
}
} finally {
lock.unlock();
}
if (removed || XMPPServer.getInstance().getNodeID().equals(nodeID)) {
localRoutingTable.removeRoute(new DomainPair("", address));
}
return removed;
}
Aggregations