use of org.jivesoftware.openfire.RoutingTable in project Openfire by igniterealtime.
the class LocalOutgoingServerSession method returnErrorToSender.
private void returnErrorToSender(Packet packet) {
RoutingTable routingTable = XMPPServer.getInstance().getRoutingTable();
if (packet.getError() != null) {
Log.debug("Possible double bounce: " + packet.toXML());
}
try {
if (packet instanceof IQ) {
if (((IQ) packet).isResponse()) {
Log.debug("XMPP specs forbid us to respond with an IQ error to: " + packet.toXML());
return;
}
IQ reply = new IQ();
reply.setID(packet.getID());
reply.setTo(packet.getFrom());
reply.setFrom(packet.getTo());
reply.setChildElement(((IQ) packet).getChildElement().createCopy());
reply.setType(IQ.Type.error);
reply.setError(PacketError.Condition.remote_server_not_found);
routingTable.routePacket(reply.getTo(), reply, true);
} else if (packet instanceof Presence) {
if (((Presence) packet).getType() == Presence.Type.error) {
Log.debug("Double-bounce of presence: " + packet.toXML());
return;
}
Presence reply = new Presence();
reply.setID(packet.getID());
reply.setTo(packet.getFrom());
reply.setFrom(packet.getTo());
reply.setType(Presence.Type.error);
reply.setError(PacketError.Condition.remote_server_not_found);
routingTable.routePacket(reply.getTo(), reply, true);
} else if (packet instanceof Message) {
if (((Message) packet).getType() == Message.Type.error) {
Log.debug("Double-bounce of message: " + packet.toXML());
return;
}
Message reply = new Message();
reply.setID(packet.getID());
reply.setTo(packet.getFrom());
reply.setFrom(packet.getTo());
reply.setType(Message.Type.error);
reply.setThread(((Message) packet).getThread());
reply.setError(PacketError.Condition.remote_server_not_found);
routingTable.routePacket(reply.getTo(), reply, true);
}
} catch (Exception e) {
Log.error("Error returning error to sender. Original packet: " + packet, e);
}
}
use of org.jivesoftware.openfire.RoutingTable in project Openfire by igniterealtime.
the class LocalMUCRoomManager method restoreCacheContentAfterJoin.
/**
* When the local node is joining or leaving a cluster, {@link org.jivesoftware.util.cache.CacheFactory} will swap
* the implementation used to instantiate caches. This causes the cache content to be 'reset': it will no longer
* contain the data that's provided by the local node. This method restores data that's provided by the local node
* in the cache. It is expected to be invoked right after joining the cluster.
*
* This method checks whether local occupant nicknames clash with remote ones. If a clash is detected, both
* occupants are kicked out of the room.
*
* ({@link org.jivesoftware.openfire.cluster.ClusterEventListener#joinedCluster()} or leaving
*
* @param occupantManager The occupant manager that contains local occupant registration.
* @return The set of local occupants that is in the room after processing. This is the original set of local occupants of the room minus any occupants that were kicked out.
*/
public Set<OccupantManager.Occupant> restoreCacheContentAfterJoin(@Nonnull final OccupantManager occupantManager) {
Log.debug("Restoring cache content for cache '{}' after we joined the cluster, by adding all MUC Rooms that are known to the local node.", ROOM_CACHE.getName());
final Set<OccupantManager.Occupant> localOccupants = occupantManager.getLocalOccupants();
final Set<OccupantManager.Occupant> occupantsToRetain = new HashSet<>(localOccupants);
final Map<String, List<OccupantManager.Occupant>> localOccupantByRoom = localOccupants.stream().collect(Collectors.groupingBy(OccupantManager.Occupant::getRoomName));
// The state of the rooms in the clustered cache should be modified to include our local occupants.
for (Map.Entry<String, MUCRoom> localRoomEntry : localRooms.entrySet()) {
final String roomName = localRoomEntry.getKey();
Log.trace("Re-adding local room '{}' to cluster cache.", roomName);
final Lock lock = ROOM_CACHE.getLock(roomName);
lock.lock();
try {
final MUCRoom localRoom = localRoomEntry.getValue();
if (!ROOM_CACHE.containsKey(roomName)) {
Log.trace("Room was not known to the cluster. Added our representation.");
ROOM_CACHE.put(roomName, localRoom);
} else {
Log.trace("Room was known to the cluster. Merging our local representation with cluster-provided data.");
final MUCRoom roomInCluster = ROOM_CACHE.get(roomName);
// Get all occupants that were provided by the local node, and add them to the cluster-representation.
final List<OccupantManager.Occupant> localOccupantsToRestore = localOccupantByRoom.get(roomName);
if (localOccupantsToRestore != null) {
Log.trace("These occupants of the room are recognized as living on our cluster node. Adding them from the cluster-based room: {}", localOccupantsToRestore.stream().map(OccupantManager.Occupant::getRealJID).map(JID::toString).collect(Collectors.joining(", ")));
for (OccupantManager.Occupant localOccupantToRestore : localOccupantsToRestore) {
// Get the Role for the local occupant from the local representation of the room, and add that to the cluster room.
final MUCRole localOccupantRole = localRoom.getOccupantByFullJID(localOccupantToRestore.getRealJID());
if (localOccupantRole == null) {
Log.trace("Trying to add occupant '{}' but no role for that occupant exists in the local room. Data inconsistency?", localOccupantToRestore.getRealJID());
continue;
} else {
Log.trace("Found localOccupantRole {} for localOccupantToRestore {}, client route = {}", localOccupantRole, localOccupantToRestore.getRealJID(), XMPPServer.getInstance().getRoutingTable().getClientRoute(localOccupantToRestore.getRealJID()));
}
// OF-2165
// Check if the nickname of this occupant already existed for another user in the room.
// If it did, we need to kick the users out. With sincere apologies.
String nickBeingAddedToRoom = localOccupantRole.getNickname();
boolean occupantWasKicked = false;
try {
final List<MUCRole> existingOccupantsWithSameNick = roomInCluster.getOccupantsByNickname(nickBeingAddedToRoom);
final List<JID> otherUsersWithSameNick = existingOccupantsWithSameNick.stream().map(MUCRole::getUserAddress).filter(bareJid -> !bareJid.equals(localOccupantRole.getUserAddress())).collect(Collectors.toList());
if (!otherUsersWithSameNick.isEmpty()) {
// We will be routing presences to several users. The routing table may not have
// finished updating the client routes. However those are needed for routing the
// stanzas, specifically the local client route. So do that first.
RoutingTable routingTable = XMPPServer.getInstance().getRoutingTable();
if (routingTable instanceof RoutingTableImpl) {
RoutingTableImpl.class.cast(routingTable).addLocalClientRoutesToCache();
}
// There is at least one remote occupant, being a different user, with the same nick.
// Kick all.
otherUsersWithSameNick.forEach(jid -> kickOccupantBecauseOfNicknameCollision(roomInCluster, nickBeingAddedToRoom, jid, occupantManager));
final JID localUserToBeKickedFullJid = localOccupantToRestore.getRealJID();
// Now kick the local user. It has to be added to the room for a short instant so that it can actually be kicked out.
// Normally we would do this with:
// roomInCluster.addOccupantRole(localOccupantRole);
// But that notifies other nodes as well about the new occupant. We don't want that, this is
// entirely a local affair. Therefore perform two separate steps instead, without invoking
// occupant joined events.
roomInCluster.occupants.add(localOccupantRole);
occupantManager.registerOccupantJoinedLocally(localOccupantRole.getRoleAddress().asBareJID(), localOccupantRole.getUserAddress(), localOccupantRole.getNickname());
// Just added. Now kick out.
kickOccupantBecauseOfNicknameCollision(roomInCluster, nickBeingAddedToRoom, localUserToBeKickedFullJid, occupantManager);
// Inform other nodes of the kick, so they can remove the occupants from their occupant registration
occupantManager.occupantNickKicked(roomInCluster.getJID(), nickBeingAddedToRoom);
occupantWasKicked = true;
}
} catch (UserNotFoundException e) {
// This is actually the happy path. There is no remote occupant in the room with the same nick. Proceed.
}
if (!occupantWasKicked) {
roomInCluster.addOccupantRole(localOccupantRole);
} else {
occupantsToRetain.remove(localOccupantToRestore);
}
}
}
if (!roomInCluster.equals(localRoom)) {
// TODO: unsure if #equals() is enough to verify equality here.
Log.warn("Joined an Openfire cluster on which a room exists that clashes with a room that exists locally. Room name: '{}' on service '{}'", roomName, serviceName);
// TODO: handle collision. Two nodes have different rooms using the same name.
}
// Sync room back to make cluster aware of changes.
Log.debug("Re-added local room '{}' to cache, with occupants: {}", roomName, roomInCluster.getOccupants().stream().map(MUCRole::getUserAddress).map(JID::toString).collect(Collectors.joining(", ")));
ROOM_CACHE.put(roomName, roomInCluster);
// TODO: update the local copy of the room with occupants, maybe?
}
} finally {
lock.unlock();
}
}
// Add a cluster listener to clean up locally stored data when another cluster node removes it from the cache.
ROOM_CACHE.addClusteredCacheEntryListener(new ClusteredCacheEntryListener<String, MUCRoom>() {
@Override
public void entryAdded(@Nonnull String key, @Nullable MUCRoom newValue, @Nonnull NodeID nodeID) {
}
@Override
public void entryRemoved(@Nonnull String key, @Nullable MUCRoom oldValue, @Nonnull NodeID nodeID) {
localRooms.remove(key);
final MultiUserChatService service = XMPPServer.getInstance().getMultiUserChatManager().getMultiUserChatService(serviceName);
if (service != null) {
service.getOccupantManager().roomDestroyed(new JID(key, service.getServiceDomain(), null));
}
}
@Override
public void entryUpdated(@Nonnull String key, @Nullable MUCRoom oldValue, @Nullable MUCRoom newValue, @Nonnull NodeID nodeID) {
}
@Override
public void entryEvicted(@Nonnull String key, @Nullable MUCRoom oldValue, @Nonnull NodeID nodeID) {
localRooms.remove(key);
final MultiUserChatService service = XMPPServer.getInstance().getMultiUserChatManager().getMultiUserChatService(serviceName);
if (service != null) {
service.getOccupantManager().roomDestroyed(new JID(key, service.getServiceDomain(), null));
}
}
@Override
public void mapCleared(@Nonnull NodeID nodeID) {
}
@Override
public void mapEvicted(@Nonnull NodeID nodeID) {
}
}, false, false);
return occupantsToRetain;
}
use of org.jivesoftware.openfire.RoutingTable in project Openfire by igniterealtime.
the class RoutingTableImpl method restoreUsersSessionsCache.
/**
* When the users sessions cache is (potentially) inconsistent, it can be rebuilt from routeOwnersByClusterNode and
* routingTable.
* This method relies on the routeOwnersByClusterNode and routingTable being stable and reflecting the actual
* reality. So run this restore method <em>after or at the end of</em> cleanup on a cluster leave.
*/
private void restoreUsersSessionsCache() {
Log.info("Restoring Users Sessions Cache");
// First remove all elements from users sessions cache that are not present in user caches
final Set<String> existingUserRoutes = routeOwnersByClusterNode.values().stream().flatMap(Collection::stream).collect(Collectors.toSet());
existingUserRoutes.addAll(localRoutingTable.getClientRoutes().stream().map(LocalClientSession::getAddress).map(JID::toFullJID).collect(Collectors.toSet()));
final Set<String> entriesToRemove = usersSessionsCache.values().stream().flatMap(Collection::stream).filter(fullJid -> !existingUserRoutes.contains(fullJid)).collect(Collectors.toSet());
entriesToRemove.forEach(fullJid -> CacheUtil.removeValueFromMultiValuedCache(usersSessionsCache, new JID(fullJid).toBareJID(), fullJid));
// Add elements from users caches that are not present in users sessions cache
existingUserRoutes.forEach(fullJid -> {
CacheUtil.addValueToMultiValuedCache(usersSessionsCache, new JID(fullJid).toBareJID(), fullJid, HashSet::new);
});
}
Aggregations