Search in sources :

Example 1 with ClusteredCacheEntryListener

use of org.jivesoftware.openfire.cluster.ClusteredCacheEntryListener in project Openfire by igniterealtime.

the class LocalMUCRoomManager method restoreCacheContentAfterJoin.

/**
 * When the local node is joining or leaving a cluster, {@link org.jivesoftware.util.cache.CacheFactory} will swap
 * the implementation used to instantiate caches. This causes the cache content to be 'reset': it will no longer
 * contain the data that's provided by the local node. This method restores data that's provided by the local node
 * in the cache. It is expected to be invoked right after joining the cluster.
 *
 * This method checks whether local occupant nicknames clash with remote ones. If a clash is detected, both
 * occupants are kicked out of the room.
 *
 * ({@link org.jivesoftware.openfire.cluster.ClusterEventListener#joinedCluster()} or leaving
 *
 * @param occupantManager The occupant manager that contains local occupant registration.
 * @return The set of local occupants that is in the room after processing. This is the original set of local occupants of the room minus any occupants that were kicked out.
 */
public Set<OccupantManager.Occupant> restoreCacheContentAfterJoin(@Nonnull final OccupantManager occupantManager) {
    Log.debug("Restoring cache content for cache '{}' after we joined the cluster, by adding all MUC Rooms that are known to the local node.", ROOM_CACHE.getName());
    final Set<OccupantManager.Occupant> localOccupants = occupantManager.getLocalOccupants();
    final Set<OccupantManager.Occupant> occupantsToRetain = new HashSet<>(localOccupants);
    final Map<String, List<OccupantManager.Occupant>> localOccupantByRoom = localOccupants.stream().collect(Collectors.groupingBy(OccupantManager.Occupant::getRoomName));
    // The state of the rooms in the clustered cache should be modified to include our local occupants.
    for (Map.Entry<String, MUCRoom> localRoomEntry : localRooms.entrySet()) {
        final String roomName = localRoomEntry.getKey();
        Log.trace("Re-adding local room '{}' to cluster cache.", roomName);
        final Lock lock = ROOM_CACHE.getLock(roomName);
        lock.lock();
        try {
            final MUCRoom localRoom = localRoomEntry.getValue();
            if (!ROOM_CACHE.containsKey(roomName)) {
                Log.trace("Room was not known to the cluster. Added our representation.");
                ROOM_CACHE.put(roomName, localRoom);
            } else {
                Log.trace("Room was known to the cluster. Merging our local representation with cluster-provided data.");
                final MUCRoom roomInCluster = ROOM_CACHE.get(roomName);
                // Get all occupants that were provided by the local node, and add them to the cluster-representation.
                final List<OccupantManager.Occupant> localOccupantsToRestore = localOccupantByRoom.get(roomName);
                if (localOccupantsToRestore != null) {
                    Log.trace("These occupants of the room are recognized as living on our cluster node. Adding them from the cluster-based room: {}", localOccupantsToRestore.stream().map(OccupantManager.Occupant::getRealJID).map(JID::toString).collect(Collectors.joining(", ")));
                    for (OccupantManager.Occupant localOccupantToRestore : localOccupantsToRestore) {
                        // Get the Role for the local occupant from the local representation of the room, and add that to the cluster room.
                        final MUCRole localOccupantRole = localRoom.getOccupantByFullJID(localOccupantToRestore.getRealJID());
                        if (localOccupantRole == null) {
                            Log.trace("Trying to add occupant '{}' but no role for that occupant exists in the local room. Data inconsistency?", localOccupantToRestore.getRealJID());
                            continue;
                        } else {
                            Log.trace("Found localOccupantRole {} for localOccupantToRestore {}, client route = {}", localOccupantRole, localOccupantToRestore.getRealJID(), XMPPServer.getInstance().getRoutingTable().getClientRoute(localOccupantToRestore.getRealJID()));
                        }
                        // OF-2165
                        // Check if the nickname of this occupant already existed for another user in the room.
                        // If it did, we need to kick the users out. With sincere apologies.
                        String nickBeingAddedToRoom = localOccupantRole.getNickname();
                        boolean occupantWasKicked = false;
                        try {
                            final List<MUCRole> existingOccupantsWithSameNick = roomInCluster.getOccupantsByNickname(nickBeingAddedToRoom);
                            final List<JID> otherUsersWithSameNick = existingOccupantsWithSameNick.stream().map(MUCRole::getUserAddress).filter(bareJid -> !bareJid.equals(localOccupantRole.getUserAddress())).collect(Collectors.toList());
                            if (!otherUsersWithSameNick.isEmpty()) {
                                // We will be routing presences to several users. The routing table may not have
                                // finished updating the client routes. However those are needed for routing the
                                // stanzas, specifically the local client route. So do that first.
                                RoutingTable routingTable = XMPPServer.getInstance().getRoutingTable();
                                if (routingTable instanceof RoutingTableImpl) {
                                    RoutingTableImpl.class.cast(routingTable).addLocalClientRoutesToCache();
                                }
                                // There is at least one remote occupant, being a different user, with the same nick.
                                // Kick all.
                                otherUsersWithSameNick.forEach(jid -> kickOccupantBecauseOfNicknameCollision(roomInCluster, nickBeingAddedToRoom, jid, occupantManager));
                                final JID localUserToBeKickedFullJid = localOccupantToRestore.getRealJID();
                                // Now kick the local user. It has to be added to the room for a short instant so that it can actually be kicked out.
                                // Normally we would do this with:
                                // roomInCluster.addOccupantRole(localOccupantRole);
                                // But that notifies other nodes as well about the new occupant. We don't want that, this is
                                // entirely a local affair. Therefore perform two separate steps instead, without invoking
                                // occupant joined events.
                                roomInCluster.occupants.add(localOccupantRole);
                                occupantManager.registerOccupantJoinedLocally(localOccupantRole.getRoleAddress().asBareJID(), localOccupantRole.getUserAddress(), localOccupantRole.getNickname());
                                // Just added. Now kick out.
                                kickOccupantBecauseOfNicknameCollision(roomInCluster, nickBeingAddedToRoom, localUserToBeKickedFullJid, occupantManager);
                                // Inform other nodes of the kick, so they can remove the occupants from their occupant registration
                                occupantManager.occupantNickKicked(roomInCluster.getJID(), nickBeingAddedToRoom);
                                occupantWasKicked = true;
                            }
                        } catch (UserNotFoundException e) {
                        // This is actually the happy path. There is no remote occupant in the room with the same nick. Proceed.
                        }
                        if (!occupantWasKicked) {
                            roomInCluster.addOccupantRole(localOccupantRole);
                        } else {
                            occupantsToRetain.remove(localOccupantToRestore);
                        }
                    }
                }
                if (!roomInCluster.equals(localRoom)) {
                    // TODO: unsure if #equals() is enough to verify equality here.
                    Log.warn("Joined an Openfire cluster on which a room exists that clashes with a room that exists locally. Room name: '{}' on service '{}'", roomName, serviceName);
                // TODO: handle collision. Two nodes have different rooms using the same name.
                }
                // Sync room back to make cluster aware of changes.
                Log.debug("Re-added local room '{}' to cache, with occupants: {}", roomName, roomInCluster.getOccupants().stream().map(MUCRole::getUserAddress).map(JID::toString).collect(Collectors.joining(", ")));
                ROOM_CACHE.put(roomName, roomInCluster);
            // TODO: update the local copy of the room with occupants, maybe?
            }
        } finally {
            lock.unlock();
        }
    }
    // Add a cluster listener to clean up locally stored data when another cluster node removes it from the cache.
    ROOM_CACHE.addClusteredCacheEntryListener(new ClusteredCacheEntryListener<String, MUCRoom>() {

        @Override
        public void entryAdded(@Nonnull String key, @Nullable MUCRoom newValue, @Nonnull NodeID nodeID) {
        }

        @Override
        public void entryRemoved(@Nonnull String key, @Nullable MUCRoom oldValue, @Nonnull NodeID nodeID) {
            localRooms.remove(key);
            final MultiUserChatService service = XMPPServer.getInstance().getMultiUserChatManager().getMultiUserChatService(serviceName);
            if (service != null) {
                service.getOccupantManager().roomDestroyed(new JID(key, service.getServiceDomain(), null));
            }
        }

        @Override
        public void entryUpdated(@Nonnull String key, @Nullable MUCRoom oldValue, @Nullable MUCRoom newValue, @Nonnull NodeID nodeID) {
        }

        @Override
        public void entryEvicted(@Nonnull String key, @Nullable MUCRoom oldValue, @Nonnull NodeID nodeID) {
            localRooms.remove(key);
            final MultiUserChatService service = XMPPServer.getInstance().getMultiUserChatManager().getMultiUserChatService(serviceName);
            if (service != null) {
                service.getOccupantManager().roomDestroyed(new JID(key, service.getServiceDomain(), null));
            }
        }

        @Override
        public void mapCleared(@Nonnull NodeID nodeID) {
        }

        @Override
        public void mapEvicted(@Nonnull NodeID nodeID) {
        }
    }, false, false);
    return occupantsToRetain;
}
Also used : Presence(org.xmpp.packet.Presence) GroupEventDispatcher(org.jivesoftware.openfire.event.GroupEventDispatcher) RoutingTableImpl(org.jivesoftware.openfire.spi.RoutingTableImpl) Date(java.util.Date) MultiUserChatService(org.jivesoftware.openfire.muc.MultiUserChatService) CacheFactory(org.jivesoftware.util.cache.CacheFactory) ClusteredCacheEntryListener(org.jivesoftware.openfire.cluster.ClusteredCacheEntryListener) LoggerFactory(org.slf4j.LoggerFactory) NotAllowedException(org.jivesoftware.openfire.muc.NotAllowedException) HashMap(java.util.HashMap) JID(org.xmpp.packet.JID) HashSet(java.util.HashSet) NodeID(org.jivesoftware.openfire.cluster.NodeID) Duration(java.time.Duration) Map(java.util.Map) XMPPServer(org.jivesoftware.openfire.XMPPServer) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Cache(org.jivesoftware.util.cache.Cache) RoutingTable(org.jivesoftware.openfire.RoutingTable) Logger(org.slf4j.Logger) Collection(java.util.Collection) MUCRoom(org.jivesoftware.openfire.muc.MUCRoom) Set(java.util.Set) Collectors(java.util.stream.Collectors) UserNotFoundException(org.jivesoftware.openfire.user.UserNotFoundException) MUCRole(org.jivesoftware.openfire.muc.MUCRole) List(java.util.List) Lock(java.util.concurrent.locks.Lock) Collections(java.util.Collections) UserNotFoundException(org.jivesoftware.openfire.user.UserNotFoundException) MultiUserChatService(org.jivesoftware.openfire.muc.MultiUserChatService) MUCRole(org.jivesoftware.openfire.muc.MUCRole) NodeID(org.jivesoftware.openfire.cluster.NodeID) List(java.util.List) HashSet(java.util.HashSet) JID(org.xmpp.packet.JID) RoutingTableImpl(org.jivesoftware.openfire.spi.RoutingTableImpl) Lock(java.util.concurrent.locks.Lock) MUCRoom(org.jivesoftware.openfire.muc.MUCRoom) RoutingTable(org.jivesoftware.openfire.RoutingTable) HashMap(java.util.HashMap) Map(java.util.Map)

Example 2 with ClusteredCacheEntryListener

use of org.jivesoftware.openfire.cluster.ClusteredCacheEntryListener in project Openfire by igniterealtime.

the class RoutingTableImpl method joinedCluster.

@Override
public void joinedCluster() {
    // The local node joined a cluster.
    // 
    // Upon joining a cluster, clustered caches are reset to their clustered equivalent (by the swap from the local
    // cache implementation to the clustered cache implementation that's done in the implementation of
    // org.jivesoftware.util.cache.CacheFactory.joinedCluster). This means that they now hold data that's
    // available on all other cluster nodes. Data that's available on the local node needs to be added again.
    restoreCacheContent();
    Log.debug("Add the entry listeners to the corresponding caches.");
    // Register a cache entry event listeners that will collect data for entries added by all other cluster nodes,
    // which is intended to be used (only) in the event of a cluster split.
    final ClusteredCacheEntryListener<String, ClientRoute> userCacheEntryListener = new ReverseLookupUpdatingCacheEntryListener<>(routeOwnersByClusterNode);
    final ClusteredCacheEntryListener<DomainPair, NodeID> serversCacheEntryListener = new ReverseLookupUpdatingCacheEntryListener<>(s2sDomainPairsByClusterNode);
    final ClusteredCacheEntryListener<String, HashSet<NodeID>> componentsCacheEntryListener = new ReverseLookupComputingCacheEntryListener<>(componentsByClusterNode, nodeIDS -> nodeIDS.stream().filter(n -> !n.equals(XMPPServer.getInstance().getNodeID())).collect(Collectors.toSet()));
    // Note that, when #joinedCluster() fired, the cache will _always_ have been replaced, meaning that it won't
    // have old event listeners. When #leaveCluster() fires, the cache will be destroyed. This takes away the need
    // to explicitly deregister the listener in that case.
    // Ensure that event listeners have been registered with the caches, before starting to simulate 'entryAdded' events,
    // to prevent the possibility of having entries that are missed by the simulation because of bad timing.
    usersCache.addClusteredCacheEntryListener(userCacheEntryListener, false, false);
    anonymousUsersCache.addClusteredCacheEntryListener(userCacheEntryListener, false, false);
    serversCache.addClusteredCacheEntryListener(serversCacheEntryListener, false, false);
    componentsCache.addClusteredCacheEntryListener(componentsCacheEntryListener, true, true);
    // This is not necessary for the usersSessions cache, because its content is being managed while the content
    // of users cache and anonymous users cache is being managed.
    Log.debug("Simulate 'entryAdded' for all data that already exists elsewhere in the cluster.");
    Stream.concat(usersCache.entrySet().stream(), anonymousUsersCache.entrySet().stream()).filter(entry -> !entry.getValue().getNodeID().equals(XMPPServer.getInstance().getNodeID())).forEach(entry -> userCacheEntryListener.entryAdded(entry.getKey(), entry.getValue(), entry.getValue().getNodeID()));
    serversCache.entrySet().stream().filter(entry -> !entry.getValue().equals(XMPPServer.getInstance().getNodeID())).forEach(entry -> serversCacheEntryListener.entryAdded(entry.getKey(), entry.getValue(), entry.getValue()));
    componentsCache.entrySet().forEach(entry -> {
        entry.getValue().forEach(nodeIdForComponent -> {
            // Iterate over all node ids on which the component is known
            if (!nodeIdForComponent.equals(XMPPServer.getInstance().getNodeID())) {
                // Here we pretend that the component has been added by the node id on which it is reported to
                // be available. This might not have been the case, but it is probably accurate. An alternative
                // approach is not easily available.
                componentsCacheEntryListener.entryAdded(entry.getKey(), entry.getValue(), nodeIdForComponent);
            }
        });
    });
    // Broadcast presence of local sessions to remote sessions when subscribed to presence.
    // Probe presences of remote sessions when subscribed to presence of local session.
    // Send pending subscription requests to local sessions from remote sessions.
    // Deliver offline messages sent to local sessions that were unavailable in other nodes.
    // Send available presences of local sessions to other resources of the same user.
    PresenceUpdateHandler presenceUpdateHandler = XMPPServer.getInstance().getPresenceUpdateHandler();
    for (LocalClientSession session : localRoutingTable.getClientRoutes()) {
        // Simulate that the local session has just become available
        session.setInitialized(false);
        // Simulate that current session presence has just been received
        presenceUpdateHandler.process(session.getPresence());
    }
// TODO OF-2067: the above also (re)generates events on the local node, where these events had already occurred. Ideally, that should not happen.
// TODO OF-2066: shouldn't a similar action be done on the other nodes, so that the node that just joined gets informed about all sessions living on other cluster nodes?
}
Also used : Presence(org.xmpp.packet.Presence) LocalClientSession(org.jivesoftware.openfire.session.LocalClientSession) Forwarded(org.jivesoftware.openfire.forward.Forwarded) ClientSession(org.jivesoftware.openfire.session.ClientSession) Received(org.jivesoftware.openfire.carbons.Received) BasicModule(org.jivesoftware.openfire.container.BasicModule) CacheFactory(org.jivesoftware.util.cache.CacheFactory) ClusteredCacheEntryListener(org.jivesoftware.openfire.cluster.ClusteredCacheEntryListener) LoggerFactory(org.slf4j.LoggerFactory) JiveGlobals(org.jivesoftware.util.JiveGlobals) MessageRouter(org.jivesoftware.openfire.MessageRouter) ReverseLookupUpdatingCacheEntryListener(org.jivesoftware.util.cache.ReverseLookupUpdatingCacheEntryListener) PresenceUpdateHandler(org.jivesoftware.openfire.handler.PresenceUpdateHandler) Message(org.xmpp.packet.Message) OutgoingServerSession(org.jivesoftware.openfire.session.OutgoingServerSession) CacheUtil(org.jivesoftware.util.cache.CacheUtil) RemoteServerManager(org.jivesoftware.openfire.server.RemoteServerManager) Cache(org.jivesoftware.util.cache.Cache) RoutingTable(org.jivesoftware.openfire.RoutingTable) PresenceRouter(org.jivesoftware.openfire.PresenceRouter) ClusterManager(org.jivesoftware.openfire.cluster.ClusterManager) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Collectors(java.util.stream.Collectors) Stream(java.util.stream.Stream) ConnectionSettings(org.jivesoftware.openfire.session.ConnectionSettings) RemoteSessionLocator(org.jivesoftware.openfire.session.RemoteSessionLocator) java.util(java.util) PacketException(org.jivesoftware.openfire.PacketException) Multimap(com.google.common.collect.Multimap) ReverseLookupComputingCacheEntryListener(org.jivesoftware.util.cache.ReverseLookupComputingCacheEntryListener) JID(org.xmpp.packet.JID) Function(java.util.function.Function) UnauthorizedException(org.jivesoftware.openfire.auth.UnauthorizedException) ConcurrentMap(java.util.concurrent.ConcurrentMap) NodeID(org.jivesoftware.openfire.cluster.NodeID) XMPPServer(org.jivesoftware.openfire.XMPPServer) RoutableChannelHandler(org.jivesoftware.openfire.RoutableChannelHandler) RemotePacketRouter(org.jivesoftware.openfire.RemotePacketRouter) ClusterEventListener(org.jivesoftware.openfire.cluster.ClusterEventListener) DomainPair(org.jivesoftware.openfire.session.DomainPair) LocalOutgoingServerSession(org.jivesoftware.openfire.session.LocalOutgoingServerSession) Logger(org.slf4j.Logger) ConsistencyChecks(org.jivesoftware.util.cache.ConsistencyChecks) ExternalComponentManager(org.jivesoftware.openfire.component.ExternalComponentManager) AtomicLong(java.util.concurrent.atomic.AtomicLong) Lock(java.util.concurrent.locks.Lock) Packet(org.xmpp.packet.Packet) OutgoingSessionPromise(org.jivesoftware.openfire.server.OutgoingSessionPromise) Element(org.dom4j.Element) QName(org.dom4j.QName) IQRouter(org.jivesoftware.openfire.IQRouter) IQ(org.xmpp.packet.IQ) ReverseLookupUpdatingCacheEntryListener(org.jivesoftware.util.cache.ReverseLookupUpdatingCacheEntryListener) PresenceUpdateHandler(org.jivesoftware.openfire.handler.PresenceUpdateHandler) LocalClientSession(org.jivesoftware.openfire.session.LocalClientSession) DomainPair(org.jivesoftware.openfire.session.DomainPair) NodeID(org.jivesoftware.openfire.cluster.NodeID) ReverseLookupComputingCacheEntryListener(org.jivesoftware.util.cache.ReverseLookupComputingCacheEntryListener)

Aggregations

Lock (java.util.concurrent.locks.Lock)2 Collectors (java.util.stream.Collectors)2 RoutingTable (org.jivesoftware.openfire.RoutingTable)2 XMPPServer (org.jivesoftware.openfire.XMPPServer)2 ClusteredCacheEntryListener (org.jivesoftware.openfire.cluster.ClusteredCacheEntryListener)2 NodeID (org.jivesoftware.openfire.cluster.NodeID)2 Cache (org.jivesoftware.util.cache.Cache)2 Multimap (com.google.common.collect.Multimap)1 Duration (java.time.Duration)1 java.util (java.util)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 Date (java.util.Date)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ConcurrentMap (java.util.concurrent.ConcurrentMap)1