use of org.jivesoftware.openfire.cluster.ClusteredCacheEntryListener in project Openfire by igniterealtime.
the class LocalMUCRoomManager method restoreCacheContentAfterJoin.
/**
* When the local node is joining or leaving a cluster, {@link org.jivesoftware.util.cache.CacheFactory} will swap
* the implementation used to instantiate caches. This causes the cache content to be 'reset': it will no longer
* contain the data that's provided by the local node. This method restores data that's provided by the local node
* in the cache. It is expected to be invoked right after joining the cluster.
*
* This method checks whether local occupant nicknames clash with remote ones. If a clash is detected, both
* occupants are kicked out of the room.
*
* ({@link org.jivesoftware.openfire.cluster.ClusterEventListener#joinedCluster()} or leaving
*
* @param occupantManager The occupant manager that contains local occupant registration.
* @return The set of local occupants that is in the room after processing. This is the original set of local occupants of the room minus any occupants that were kicked out.
*/
public Set<OccupantManager.Occupant> restoreCacheContentAfterJoin(@Nonnull final OccupantManager occupantManager) {
Log.debug("Restoring cache content for cache '{}' after we joined the cluster, by adding all MUC Rooms that are known to the local node.", ROOM_CACHE.getName());
final Set<OccupantManager.Occupant> localOccupants = occupantManager.getLocalOccupants();
final Set<OccupantManager.Occupant> occupantsToRetain = new HashSet<>(localOccupants);
final Map<String, List<OccupantManager.Occupant>> localOccupantByRoom = localOccupants.stream().collect(Collectors.groupingBy(OccupantManager.Occupant::getRoomName));
// The state of the rooms in the clustered cache should be modified to include our local occupants.
for (Map.Entry<String, MUCRoom> localRoomEntry : localRooms.entrySet()) {
final String roomName = localRoomEntry.getKey();
Log.trace("Re-adding local room '{}' to cluster cache.", roomName);
final Lock lock = ROOM_CACHE.getLock(roomName);
lock.lock();
try {
final MUCRoom localRoom = localRoomEntry.getValue();
if (!ROOM_CACHE.containsKey(roomName)) {
Log.trace("Room was not known to the cluster. Added our representation.");
ROOM_CACHE.put(roomName, localRoom);
} else {
Log.trace("Room was known to the cluster. Merging our local representation with cluster-provided data.");
final MUCRoom roomInCluster = ROOM_CACHE.get(roomName);
// Get all occupants that were provided by the local node, and add them to the cluster-representation.
final List<OccupantManager.Occupant> localOccupantsToRestore = localOccupantByRoom.get(roomName);
if (localOccupantsToRestore != null) {
Log.trace("These occupants of the room are recognized as living on our cluster node. Adding them from the cluster-based room: {}", localOccupantsToRestore.stream().map(OccupantManager.Occupant::getRealJID).map(JID::toString).collect(Collectors.joining(", ")));
for (OccupantManager.Occupant localOccupantToRestore : localOccupantsToRestore) {
// Get the Role for the local occupant from the local representation of the room, and add that to the cluster room.
final MUCRole localOccupantRole = localRoom.getOccupantByFullJID(localOccupantToRestore.getRealJID());
if (localOccupantRole == null) {
Log.trace("Trying to add occupant '{}' but no role for that occupant exists in the local room. Data inconsistency?", localOccupantToRestore.getRealJID());
continue;
} else {
Log.trace("Found localOccupantRole {} for localOccupantToRestore {}, client route = {}", localOccupantRole, localOccupantToRestore.getRealJID(), XMPPServer.getInstance().getRoutingTable().getClientRoute(localOccupantToRestore.getRealJID()));
}
// OF-2165
// Check if the nickname of this occupant already existed for another user in the room.
// If it did, we need to kick the users out. With sincere apologies.
String nickBeingAddedToRoom = localOccupantRole.getNickname();
boolean occupantWasKicked = false;
try {
final List<MUCRole> existingOccupantsWithSameNick = roomInCluster.getOccupantsByNickname(nickBeingAddedToRoom);
final List<JID> otherUsersWithSameNick = existingOccupantsWithSameNick.stream().map(MUCRole::getUserAddress).filter(bareJid -> !bareJid.equals(localOccupantRole.getUserAddress())).collect(Collectors.toList());
if (!otherUsersWithSameNick.isEmpty()) {
// We will be routing presences to several users. The routing table may not have
// finished updating the client routes. However those are needed for routing the
// stanzas, specifically the local client route. So do that first.
RoutingTable routingTable = XMPPServer.getInstance().getRoutingTable();
if (routingTable instanceof RoutingTableImpl) {
RoutingTableImpl.class.cast(routingTable).addLocalClientRoutesToCache();
}
// There is at least one remote occupant, being a different user, with the same nick.
// Kick all.
otherUsersWithSameNick.forEach(jid -> kickOccupantBecauseOfNicknameCollision(roomInCluster, nickBeingAddedToRoom, jid, occupantManager));
final JID localUserToBeKickedFullJid = localOccupantToRestore.getRealJID();
// Now kick the local user. It has to be added to the room for a short instant so that it can actually be kicked out.
// Normally we would do this with:
// roomInCluster.addOccupantRole(localOccupantRole);
// But that notifies other nodes as well about the new occupant. We don't want that, this is
// entirely a local affair. Therefore perform two separate steps instead, without invoking
// occupant joined events.
roomInCluster.occupants.add(localOccupantRole);
occupantManager.registerOccupantJoinedLocally(localOccupantRole.getRoleAddress().asBareJID(), localOccupantRole.getUserAddress(), localOccupantRole.getNickname());
// Just added. Now kick out.
kickOccupantBecauseOfNicknameCollision(roomInCluster, nickBeingAddedToRoom, localUserToBeKickedFullJid, occupantManager);
// Inform other nodes of the kick, so they can remove the occupants from their occupant registration
occupantManager.occupantNickKicked(roomInCluster.getJID(), nickBeingAddedToRoom);
occupantWasKicked = true;
}
} catch (UserNotFoundException e) {
// This is actually the happy path. There is no remote occupant in the room with the same nick. Proceed.
}
if (!occupantWasKicked) {
roomInCluster.addOccupantRole(localOccupantRole);
} else {
occupantsToRetain.remove(localOccupantToRestore);
}
}
}
if (!roomInCluster.equals(localRoom)) {
// TODO: unsure if #equals() is enough to verify equality here.
Log.warn("Joined an Openfire cluster on which a room exists that clashes with a room that exists locally. Room name: '{}' on service '{}'", roomName, serviceName);
// TODO: handle collision. Two nodes have different rooms using the same name.
}
// Sync room back to make cluster aware of changes.
Log.debug("Re-added local room '{}' to cache, with occupants: {}", roomName, roomInCluster.getOccupants().stream().map(MUCRole::getUserAddress).map(JID::toString).collect(Collectors.joining(", ")));
ROOM_CACHE.put(roomName, roomInCluster);
// TODO: update the local copy of the room with occupants, maybe?
}
} finally {
lock.unlock();
}
}
// Add a cluster listener to clean up locally stored data when another cluster node removes it from the cache.
ROOM_CACHE.addClusteredCacheEntryListener(new ClusteredCacheEntryListener<String, MUCRoom>() {
@Override
public void entryAdded(@Nonnull String key, @Nullable MUCRoom newValue, @Nonnull NodeID nodeID) {
}
@Override
public void entryRemoved(@Nonnull String key, @Nullable MUCRoom oldValue, @Nonnull NodeID nodeID) {
localRooms.remove(key);
final MultiUserChatService service = XMPPServer.getInstance().getMultiUserChatManager().getMultiUserChatService(serviceName);
if (service != null) {
service.getOccupantManager().roomDestroyed(new JID(key, service.getServiceDomain(), null));
}
}
@Override
public void entryUpdated(@Nonnull String key, @Nullable MUCRoom oldValue, @Nullable MUCRoom newValue, @Nonnull NodeID nodeID) {
}
@Override
public void entryEvicted(@Nonnull String key, @Nullable MUCRoom oldValue, @Nonnull NodeID nodeID) {
localRooms.remove(key);
final MultiUserChatService service = XMPPServer.getInstance().getMultiUserChatManager().getMultiUserChatService(serviceName);
if (service != null) {
service.getOccupantManager().roomDestroyed(new JID(key, service.getServiceDomain(), null));
}
}
@Override
public void mapCleared(@Nonnull NodeID nodeID) {
}
@Override
public void mapEvicted(@Nonnull NodeID nodeID) {
}
}, false, false);
return occupantsToRetain;
}
use of org.jivesoftware.openfire.cluster.ClusteredCacheEntryListener in project Openfire by igniterealtime.
the class RoutingTableImpl method joinedCluster.
@Override
public void joinedCluster() {
// The local node joined a cluster.
//
// Upon joining a cluster, clustered caches are reset to their clustered equivalent (by the swap from the local
// cache implementation to the clustered cache implementation that's done in the implementation of
// org.jivesoftware.util.cache.CacheFactory.joinedCluster). This means that they now hold data that's
// available on all other cluster nodes. Data that's available on the local node needs to be added again.
restoreCacheContent();
Log.debug("Add the entry listeners to the corresponding caches.");
// Register a cache entry event listeners that will collect data for entries added by all other cluster nodes,
// which is intended to be used (only) in the event of a cluster split.
final ClusteredCacheEntryListener<String, ClientRoute> userCacheEntryListener = new ReverseLookupUpdatingCacheEntryListener<>(routeOwnersByClusterNode);
final ClusteredCacheEntryListener<DomainPair, NodeID> serversCacheEntryListener = new ReverseLookupUpdatingCacheEntryListener<>(s2sDomainPairsByClusterNode);
final ClusteredCacheEntryListener<String, HashSet<NodeID>> componentsCacheEntryListener = new ReverseLookupComputingCacheEntryListener<>(componentsByClusterNode, nodeIDS -> nodeIDS.stream().filter(n -> !n.equals(XMPPServer.getInstance().getNodeID())).collect(Collectors.toSet()));
// Note that, when #joinedCluster() fired, the cache will _always_ have been replaced, meaning that it won't
// have old event listeners. When #leaveCluster() fires, the cache will be destroyed. This takes away the need
// to explicitly deregister the listener in that case.
// Ensure that event listeners have been registered with the caches, before starting to simulate 'entryAdded' events,
// to prevent the possibility of having entries that are missed by the simulation because of bad timing.
usersCache.addClusteredCacheEntryListener(userCacheEntryListener, false, false);
anonymousUsersCache.addClusteredCacheEntryListener(userCacheEntryListener, false, false);
serversCache.addClusteredCacheEntryListener(serversCacheEntryListener, false, false);
componentsCache.addClusteredCacheEntryListener(componentsCacheEntryListener, true, true);
// This is not necessary for the usersSessions cache, because its content is being managed while the content
// of users cache and anonymous users cache is being managed.
Log.debug("Simulate 'entryAdded' for all data that already exists elsewhere in the cluster.");
Stream.concat(usersCache.entrySet().stream(), anonymousUsersCache.entrySet().stream()).filter(entry -> !entry.getValue().getNodeID().equals(XMPPServer.getInstance().getNodeID())).forEach(entry -> userCacheEntryListener.entryAdded(entry.getKey(), entry.getValue(), entry.getValue().getNodeID()));
serversCache.entrySet().stream().filter(entry -> !entry.getValue().equals(XMPPServer.getInstance().getNodeID())).forEach(entry -> serversCacheEntryListener.entryAdded(entry.getKey(), entry.getValue(), entry.getValue()));
componentsCache.entrySet().forEach(entry -> {
entry.getValue().forEach(nodeIdForComponent -> {
// Iterate over all node ids on which the component is known
if (!nodeIdForComponent.equals(XMPPServer.getInstance().getNodeID())) {
// Here we pretend that the component has been added by the node id on which it is reported to
// be available. This might not have been the case, but it is probably accurate. An alternative
// approach is not easily available.
componentsCacheEntryListener.entryAdded(entry.getKey(), entry.getValue(), nodeIdForComponent);
}
});
});
// Broadcast presence of local sessions to remote sessions when subscribed to presence.
// Probe presences of remote sessions when subscribed to presence of local session.
// Send pending subscription requests to local sessions from remote sessions.
// Deliver offline messages sent to local sessions that were unavailable in other nodes.
// Send available presences of local sessions to other resources of the same user.
PresenceUpdateHandler presenceUpdateHandler = XMPPServer.getInstance().getPresenceUpdateHandler();
for (LocalClientSession session : localRoutingTable.getClientRoutes()) {
// Simulate that the local session has just become available
session.setInitialized(false);
// Simulate that current session presence has just been received
presenceUpdateHandler.process(session.getPresence());
}
// TODO OF-2067: the above also (re)generates events on the local node, where these events had already occurred. Ideally, that should not happen.
// TODO OF-2066: shouldn't a similar action be done on the other nodes, so that the node that just joined gets informed about all sessions living on other cluster nodes?
}
Aggregations