Search in sources :

Example 26 with NodeID

use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.

the class ClusterListener method leaveCluster.

private synchronized void leaveCluster() {
    if (isDone()) {
        // not a cluster member
        return;
    }
    seniorClusterMember = false;
    // Clean up all traces. This will set all remote sessions as unavailable
    List<NodeID> nodeIDs = new ArrayList<NodeID>(nodeSessions.keySet());
    // Trigger event. Wait until the listeners have processed the event. Caches will be populated
    // again with local content.
    ClusterManager.fireLeftCluster();
    if (!XMPPServer.getInstance().isShuttingDown()) {
        for (NodeID key : nodeIDs) {
            // Clean up directed presences sent from entities hosted in the leaving node to local entities
            // Clean up directed presences sent to entities hosted in the leaving node from local entities
            cleanupDirectedPresences(key);
            // Clean up no longer valid sessions
            cleanupPresences(key);
        }
        // Remove traces of directed presences sent from local entities to handlers that no longer exist
        // At this point c2s sessions are gone from the routing table so we can identify expired sessions
        XMPPServer.getInstance().getPresenceUpdateHandler().removedExpiredPresences();
    }
    logger.info("Left cluster as node: " + cluster.getLocalMember().getUuid());
    done = true;
}
Also used : ArrayList(java.util.ArrayList) NodeID(org.jivesoftware.openfire.cluster.NodeID)

Example 27 with NodeID

use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.

the class ClusterListener method cleanupNode.

/**
     * Executes close logic for each session hosted in the remote node that is
     * no longer available. This logic is similar to the close listeners used by
     * the {@link SessionManager}.<p>
     *
     * If the node that went down performed its own clean up logic then the other
     * cluster nodes will have the correct state. That means that this method
     * will not find any sessions to remove.<p>
     *
     * If this operation is too big and we are still in a cluster then we can
     * distribute the work in the cluster to go faster.
     *
     * @param key the key that identifies the node that is no longer available.
     */
private void cleanupNode(NodeID key) {
    // TODO Fork in another process and even ask other nodes to process work
    RoutingTable routingTable = XMPPServer.getInstance().getRoutingTable();
    RemoteSessionLocator sessionLocator = XMPPServer.getInstance().getRemoteSessionLocator();
    SessionManager manager = XMPPServer.getInstance().getSessionManager();
    // TODO Consider removing each cached entry once processed instead of all at the end. Could be more error-prove.
    Set<String> registeredUsers = lookupJIDList(key, C2SCache.getName());
    if (!registeredUsers.isEmpty()) {
        for (String fullJID : new ArrayList<String>(registeredUsers)) {
            JID offlineJID = new JID(fullJID);
            manager.removeSession(null, offlineJID, false, true);
        }
    }
    Set<String> anonymousUsers = lookupJIDList(key, anonymousC2SCache.getName());
    if (!anonymousUsers.isEmpty()) {
        for (String fullJID : new ArrayList<String>(anonymousUsers)) {
            JID offlineJID = new JID(fullJID);
            manager.removeSession(null, offlineJID, true, true);
        }
    }
    // Remove outgoing server sessions hosted in node that left the cluster
    Set<String> remoteServers = lookupJIDList(key, S2SCache.getName());
    if (!remoteServers.isEmpty()) {
        for (String fullJID : new ArrayList<String>(remoteServers)) {
            JID serverJID = new JID(fullJID);
            routingTable.removeServerRoute(serverJID);
        }
    }
    Set<String> components = lookupJIDList(key, componentsCache.getName());
    if (!components.isEmpty()) {
        for (String address : new ArrayList<String>(components)) {
            Lock lock = CacheFactory.getLock(address, componentsCache);
            try {
                lock.lock();
                Set<NodeID> nodes = (Set<NodeID>) componentsCache.get(address);
                if (nodes != null) {
                    nodes.remove(key);
                    if (nodes.isEmpty()) {
                        componentsCache.remove(address);
                    } else {
                        componentsCache.put(address, nodes);
                    }
                }
            } finally {
                lock.unlock();
            }
        }
    }
    Set<String> sessionInfo = lookupJIDList(key, sessionInfoCache.getName());
    if (!sessionInfo.isEmpty()) {
        for (String session : new ArrayList<String>(sessionInfo)) {
            sessionInfoCache.remove(session);
        // Registered sessions will be removed
        // by the clean up of the session info cache
        }
    }
    Set<String> componentSessions = lookupJIDList(key, componentSessionsCache.getName());
    if (!componentSessions.isEmpty()) {
        for (String domain : new ArrayList<String>(componentSessions)) {
            componentSessionsCache.remove(domain);
        // Registered subdomains of external component will be removed
        // by the clean up of the component cache
        }
    }
    Set<String> multiplexers = lookupJIDList(key, multiplexerSessionsCache.getName());
    if (!multiplexers.isEmpty()) {
        for (String fullJID : new ArrayList<String>(multiplexers)) {
            multiplexerSessionsCache.remove(fullJID);
        // c2s connections connected to node that went down will be cleaned up
        // by the c2s logic above. If the CM went down and the node is up then
        // connections will be cleaned up as usual
        }
    }
    Set<String> incomingSessions = lookupJIDList(key, incomingServerSessionsCache.getName());
    if (!incomingSessions.isEmpty()) {
        for (String streamIDValue : new ArrayList<>(incomingSessions)) {
            StreamID streamID = BasicStreamIDFactory.createStreamID(streamIDValue);
            IncomingServerSession session = sessionLocator.getIncomingServerSession(key.toByteArray(), streamID);
            // Remove all the hostnames that were registered for this server session
            for (String hostname : session.getValidatedDomains()) {
                manager.unregisterIncomingServerSession(hostname, session);
            }
        }
    }
    nodeSessions.remove(key);
// TODO Make sure that routing table has no entry referring to node that is gone
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) JID(org.xmpp.packet.JID) IncomingServerSession(org.jivesoftware.openfire.session.IncomingServerSession) ArrayList(java.util.ArrayList) Lock(java.util.concurrent.locks.Lock) RemoteSessionLocator(org.jivesoftware.openfire.session.RemoteSessionLocator) NodeID(org.jivesoftware.openfire.cluster.NodeID)

Example 28 with NodeID

use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.

the class NodeRuntimeStats method getNodeInfo.

/**
     * Returns a Map of HazelcastRuntimeStats.NodeInfo objects keyed by cluster Member objects.
     * A NodeInfo object is a collection of various Node stats.
     *
     * @return a Map of NodeInfo objects.
     */
public static Map<NodeID, NodeInfo> getNodeInfo() {
    // Run cluster-wide stats query
    Collection<Object> taskResult = CacheFactory.doSynchronousClusterTask(new NodeInfoTask(), true);
    Map<NodeID, NodeInfo> result = new HashMap<NodeID, NodeInfo>();
    for (Object tr : taskResult) {
        NodeInfo nodeInfo = (NodeInfo) tr;
        NodeID nodeId = NodeID.getInstance(nodeInfo.getNodeId());
        result.put(nodeId, nodeInfo);
    }
    return result;
}
Also used : HashMap(java.util.HashMap) NodeID(org.jivesoftware.openfire.cluster.NodeID)

Example 29 with NodeID

use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.

the class ConsistencyChecks method generateReportForRoutingTableComponentRoutes.

/**
 * Verifies that #componentsCache, #localRoutingTable#getComponentRoute and #componentsByClusterNode of
 * {@link org.jivesoftware.openfire.spi.RoutingTableImpl} are in a consistent state.
 * <p>
 * Note that this operation can be costly in terms of resource usage. Use with caution in large / busy systems.
 * <p>
 * The returned multi-map can contain up to four keys: info, fail, pass, data. All entry values are a human readable
 * description of a checked characteristic. When the state is consistent, no 'fail' entries will be returned.
 *
 * @param componentsCache         The cache that is used to share data across cluster nodes
 * @param localComponentRoutes    The data structure that keeps track of what data was added to the cache by the local cluster node.
 * @param componentsByClusterNode The data structure that keeps track of what data was added to the cache by the remote cluster nodes.
 * @return A consistency state report.
 */
public static Multimap<String, String> generateReportForRoutingTableComponentRoutes(@Nonnull final Cache<String, HashSet<NodeID>> componentsCache, @Nonnull final Collection<RoutableChannelHandler> localComponentRoutes, @Nonnull final HashMap<NodeID, Set<String>> componentsByClusterNode) {
    final Set<NodeID> clusterNodeIDs = ClusterManager.getNodesInfo().stream().map(ClusterNodeInfo::getNodeID).collect(Collectors.toSet());
    // Take a snapshots to reduce the chance of data changing while diagnostics are being performed
    final ConcurrentMap<String, HashSet<NodeID>> cache = new ConcurrentHashMap<>(componentsCache);
    final List<String> localComponentRoutesAddressing = localComponentRoutes.stream().map(r -> r.getAddress().toString()).collect(Collectors.toList());
    final Set<String> localComponentRoutesAddressingDuplicates = CollectionUtils.findDuplicates(localComponentRoutesAddressing);
    final List<String> remoteComponentRoutesAddressingWithNodeId = new ArrayList<>();
    for (Map.Entry<NodeID, Set<String>> entry : componentsByClusterNode.entrySet()) {
        for (String item : entry.getValue()) {
            remoteComponentRoutesAddressingWithNodeId.add(item + " (" + entry.getKey() + ")");
        }
    }
    final Multimap<String, String> result = HashMultimap.create();
    result.put("info", String.format("The cache named %s is used to share data in the cluster, which contains %d component routes.", componentsCache.getName(), cache.size()));
    result.put("info", String.format("LocalRoutingTable's getComponentRoute() response is used to track 'local' data to be restored after a cache switch-over. It tracks %d routes.", localComponentRoutesAddressing.size()));
    result.put("info", String.format("The field componentsByClusterNode is used to track data in the cache from every other cluster node. It contains %d routes for %d cluster nodes.", componentsByClusterNode.values().stream().reduce(0, (subtotal, values) -> subtotal + values.size(), Integer::sum), componentsByClusterNode.keySet().size()));
    result.put("data", String.format("%s contains these entries (these are shared in the cluster):\n%s", componentsCache.getName(), cache.entrySet().stream().map(e -> e.getKey() + "on nodes: " + e.getValue().stream().map(NodeID::toString).collect(Collectors.joining(", "))).collect(Collectors.joining("\n"))));
    result.put("data", String.format("LocalRoutingTable's getComponentRoute() response contains these entries (these represent 'local' data):\n%s", localComponentRoutes.stream().map(RoutableChannelHandler::getAddress).map(JID::toString).collect(Collectors.joining("\n"))));
    result.put("data", String.format("componentsByClusterNode contains these entries (these represent 'remote' data):\n%s", String.join("\n", remoteComponentRoutesAddressingWithNodeId)));
    if (localComponentRoutesAddressingDuplicates.isEmpty()) {
        result.put("pass", "There is no overlap in addressing of LocalRoutingTable's getComponentRoute() response (They are all unique values).");
    } else {
        result.put("fail", String.format("There is overlap in addressing of LocalRoutingTable's getComponentRoute() response (They are not all unique values). These %d values are duplicated: %s", localComponentRoutesAddressingDuplicates.size(), String.join(", ", localComponentRoutesAddressingDuplicates)));
    }
    if (!componentsByClusterNode.containsKey(XMPPServer.getInstance().getNodeID())) {
        result.put("pass", "componentsByClusterNode does not track data for the local cluster node.");
    } else {
        result.put("fail", "componentsByClusterNode tracks data for the local cluster node.");
    }
    if (clusterNodeIDs.containsAll(componentsByClusterNode.keySet())) {
        result.put("pass", "componentsByClusterNode tracks data for cluster nodes that are recognized in the cluster.");
    } else {
        result.put("fail", String.format("componentsByClusterNode tracks data for cluster nodes that are not recognized. All cluster nodeIDs as recognized: %s All cluster nodeIDs for which data is tracked: %s.", clusterNodeIDs.stream().map(NodeID::toString).collect(Collectors.joining(", ")), componentsByClusterNode.keySet().stream().map(NodeID::toString).collect(Collectors.joining(", "))));
    }
    final Set<String> nonCachedLocalComponentRouteAddressing = localComponentRoutesAddressing.stream().filter(v -> !cache.containsKey(v) || !cache.get(v).contains(XMPPServer.getInstance().getNodeID())).collect(Collectors.toSet());
    if (nonCachedLocalComponentRouteAddressing.isEmpty()) {
        result.put("pass", String.format("All elements in LocalRoutingTable's getComponentRoute() response exist in %s.", componentsCache.getName()));
    } else {
        result.put("fail", String.format("Not all elements in of LocalRoutingTable's getComponentRoute() response exist in %s. These %d entries do not: %s", componentsCache.getName(), nonCachedLocalComponentRouteAddressing.size(), nonCachedLocalComponentRouteAddressing.stream().map(v -> v + " on " + XMPPServer.getInstance().getNodeID()).collect(Collectors.joining(", "))));
    }
    final Set<String> nonCachedRemoteComponentRouteAddressing = new HashSet<>();
    for (final Map.Entry<NodeID, Set<String>> entry : componentsByClusterNode.entrySet()) {
        final NodeID remoteNodeID = entry.getKey();
        final Set<String> remoteComponentAddresses = entry.getValue();
        for (final String remoteComponentAddress : remoteComponentAddresses) {
            if (!cache.containsKey(remoteComponentAddress) || !cache.get(remoteComponentAddress).contains(remoteNodeID)) {
                nonCachedRemoteComponentRouteAddressing.add(remoteComponentAddress + " on " + remoteNodeID);
            }
        }
    }
    if (nonCachedRemoteComponentRouteAddressing.isEmpty()) {
        result.put("pass", String.format("All elements in componentsByClusterNode exist in %s.", componentsCache.getName()));
    } else {
        result.put("fail", String.format("Not all component routes in componentsByClusterNode exist in %s. These %d entries do not: %s", componentsCache.getName(), nonCachedRemoteComponentRouteAddressing.size(), String.join(", ", nonCachedLocalComponentRouteAddressing)));
    }
    final Set<String> nonLocallyStoredCachedComponentRouteAddressing = new HashSet<>();
    for (final Map.Entry<String, HashSet<NodeID>> entry : cache.entrySet()) {
        final String componentAddress = entry.getKey();
        final Set<NodeID> nodeIDs = entry.getValue();
        for (final NodeID nodeID : nodeIDs) {
            if (nodeID.equals(XMPPServer.getInstance().getNodeID())) {
                if (localComponentRoutes.stream().noneMatch(v -> v.getAddress().toString().equals(componentAddress))) {
                    nonLocallyStoredCachedComponentRouteAddressing.add(componentAddress + " on " + nodeID + " (the local cluster node)");
                }
            } else {
                if (!componentsByClusterNode.containsKey(nodeID) || !componentsByClusterNode.get(nodeID).contains(componentAddress)) {
                    nonLocallyStoredCachedComponentRouteAddressing.add(componentAddress + " on " + nodeID);
                }
            }
        }
    }
    if (nonLocallyStoredCachedComponentRouteAddressing.isEmpty()) {
        result.put("pass", String.format("All cache entries of %s exist in componentsByClusterNode and/or LocalRoutingTable's getComponentRoute() response.", componentsCache.getName()));
    } else {
        result.put("fail", String.format("Not all cache entries of %s exist in componentsByClusterNode and/or LocalRoutingTable's getComponentRoute() response. These %d entries do not: %s", componentsCache.getName(), nonLocallyStoredCachedComponentRouteAddressing.size(), String.join(", ", nonLocallyStoredCachedComponentRouteAddressing)));
    }
    return result;
}
Also used : java.util(java.util) ClusterManager(org.jivesoftware.openfire.cluster.ClusterManager) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) MUCRoom(org.jivesoftware.openfire.muc.MUCRoom) Multimap(com.google.common.collect.Multimap) StreamID(org.jivesoftware.openfire.StreamID) JID(org.xmpp.packet.JID) Collectors(java.util.stream.Collectors) org.jivesoftware.openfire.session(org.jivesoftware.openfire.session) ClientRoute(org.jivesoftware.openfire.spi.ClientRoute) ConcurrentMap(java.util.concurrent.ConcurrentMap) OccupantManager(org.jivesoftware.openfire.muc.spi.OccupantManager) ClusterNodeInfo(org.jivesoftware.openfire.cluster.ClusterNodeInfo) MUCRole(org.jivesoftware.openfire.muc.MUCRole) HashMultimap(com.google.common.collect.HashMultimap) NodeID(org.jivesoftware.openfire.cluster.NodeID) XMPPServer(org.jivesoftware.openfire.XMPPServer) RoutableChannelHandler(org.jivesoftware.openfire.RoutableChannelHandler) Nonnull(javax.annotation.Nonnull) CollectionUtils(org.jivesoftware.util.CollectionUtils) NodeID(org.jivesoftware.openfire.cluster.NodeID) RoutableChannelHandler(org.jivesoftware.openfire.RoutableChannelHandler) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 30 with NodeID

use of org.jivesoftware.openfire.cluster.NodeID in project Openfire by igniterealtime.

the class ReverseLookupComputingCacheEntryListener method entryUpdated.

@Override
public void entryUpdated(@Nonnull final K key, @Nullable final V oldValue, @Nullable final V newValue, @Nonnull final NodeID nodeID) {
    final Set<NodeID> nodesToAdd = ownageDeducer.apply(newValue);
    // Remove all entries for the key.
    final Iterator<Map.Entry<NodeID, Set<K>>> iter = reverseCacheRepresentation.entrySet().iterator();
    while (iter.hasNext()) {
        final Map.Entry<NodeID, Set<K>> existingEntry = iter.next();
        final NodeID existingEntryNodeID = existingEntry.getKey();
        if (!nodesToAdd.contains(existingEntryNodeID)) {
            existingEntry.getValue().remove(key);
            if (existingEntry.getValue().isEmpty()) {
                iter.remove();
            }
        }
    }
    // Add entries for only the latest state of the value.
    add(key, nodesToAdd);
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) NodeID(org.jivesoftware.openfire.cluster.NodeID) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map)

Aggregations

NodeID (org.jivesoftware.openfire.cluster.NodeID)41 JID (org.xmpp.packet.JID)18 Lock (java.util.concurrent.locks.Lock)15 java.util (java.util)12 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)12 ConcurrentMap (java.util.concurrent.ConcurrentMap)12 Collectors (java.util.stream.Collectors)11 Nonnull (javax.annotation.Nonnull)10 XMPPServer (org.jivesoftware.openfire.XMPPServer)10 UnauthorizedException (org.jivesoftware.openfire.auth.UnauthorizedException)9 MUCRole (org.jivesoftware.openfire.muc.MUCRole)8 MUCRoom (org.jivesoftware.openfire.muc.MUCRoom)8 Logger (org.slf4j.Logger)8 LoggerFactory (org.slf4j.LoggerFactory)8 Nullable (javax.annotation.Nullable)7 ClusterManager (org.jivesoftware.openfire.cluster.ClusterManager)7 CacheFactory (org.jivesoftware.util.cache.CacheFactory)7 Map (java.util.Map)6 PacketException (org.jivesoftware.openfire.PacketException)6 DomainPair (org.jivesoftware.openfire.session.DomainPair)6