use of org.apache.storm.messaging.IConnection in project storm by apache.
the class WorkerState method refreshConnections.
public void refreshConnections() {
Assignment assignment = null;
try {
assignment = getLocalAssignment(stormClusterState, topologyId);
} catch (Exception e) {
LOG.warn("Failed to read assignment. This should only happen when topology is shutting down.", e);
}
suicideIfLocalAssignmentsChanged(assignment);
Set<NodeInfo> neededConnections = new HashSet<>();
Map<Integer, NodeInfo> newTaskToNodePort = new HashMap<>();
if (null != assignment) {
Map<Integer, NodeInfo> taskToNodePort = StormCommon.taskToNodeport(assignment.get_executor_node_port());
for (Map.Entry<Integer, NodeInfo> taskToNodePortEntry : taskToNodePort.entrySet()) {
Integer task = taskToNodePortEntry.getKey();
if (outboundTasks.contains(task)) {
newTaskToNodePort.put(task, taskToNodePortEntry.getValue());
if (!localTaskIds.contains(task)) {
neededConnections.add(taskToNodePortEntry.getValue());
}
}
}
}
final Set<NodeInfo> currentConnections = cachedNodeToPortSocket.get().keySet();
final Set<NodeInfo> newConnections = Sets.difference(neededConnections, currentConnections);
final Set<NodeInfo> removeConnections = Sets.difference(currentConnections, neededConnections);
Map<String, String> nodeHost = assignment != null ? assignment.get_node_host() : null;
// Add new connections atomically
cachedNodeToPortSocket.getAndUpdate(prev -> {
Map<NodeInfo, IConnection> next = new HashMap<>(prev);
for (NodeInfo nodeInfo : newConnections) {
next.put(nodeInfo, mqContext.connect(topologyId, // Host
nodeHost.get(nodeInfo.get_node()), // Port
nodeInfo.get_port().iterator().next().intValue(), workerTransfer.getRemoteBackPressureStatus()));
}
return next;
});
try {
endpointSocketLock.writeLock().lock();
cachedTaskToNodePort.set(newTaskToNodePort);
} finally {
endpointSocketLock.writeLock().unlock();
}
// It is okay that cachedNodeToHost can be temporarily out of sync with cachedTaskToNodePort
if (nodeHost != null) {
cachedNodeToHost.set(nodeHost);
} else {
cachedNodeToHost.set(new HashMap<>());
}
for (NodeInfo nodeInfo : removeConnections) {
cachedNodeToPortSocket.get().get(nodeInfo).close();
}
// Remove old connections atomically
cachedNodeToPortSocket.getAndUpdate(prev -> {
Map<NodeInfo, IConnection> next = new HashMap<>(prev);
removeConnections.forEach(next::remove);
return next;
});
}
use of org.apache.storm.messaging.IConnection in project storm by apache.
the class Context method connect.
/**
* establish a connection to a remote server
*/
public synchronized IConnection connect(String storm_id, String host, int port) {
IConnection connection = connections.get(key(host, port));
if (connection != null) {
return connection;
}
IConnection client = new Client(storm_conf, clientChannelFactory, clientScheduleService, host, port, this);
connections.put(key(host, port), client);
return client;
}
use of org.apache.storm.messaging.IConnection in project storm by apache.
the class WorkerState method refreshConnections.
public void refreshConnections(Runnable callback) throws Exception {
Integer version = stormClusterState.assignmentVersion(topologyId, callback);
version = (null == version) ? 0 : version;
VersionedData<Assignment> assignmentVersion = assignmentVersions.get().get(topologyId);
Assignment assignment;
if (null != assignmentVersion && (assignmentVersion.getVersion() == version)) {
assignment = assignmentVersion.getData();
} else {
VersionedData<Assignment> newAssignmentVersion = new VersionedData<>(version, stormClusterState.assignmentInfoWithVersion(topologyId, callback).getData());
assignmentVersions.getAndUpdate(prev -> {
Map<String, VersionedData<Assignment>> next = new HashMap<>(prev);
next.put(topologyId, newAssignmentVersion);
return next;
});
assignment = newAssignmentVersion.getData();
}
Set<NodeInfo> neededConnections = new HashSet<>();
Map<Integer, NodeInfo> newTaskToNodePort = new HashMap<>();
if (null != assignment) {
Map<Integer, NodeInfo> taskToNodePort = StormCommon.taskToNodeport(assignment.get_executor_node_port());
for (Map.Entry<Integer, NodeInfo> taskToNodePortEntry : taskToNodePort.entrySet()) {
Integer task = taskToNodePortEntry.getKey();
if (outboundTasks.contains(task)) {
newTaskToNodePort.put(task, taskToNodePortEntry.getValue());
if (!taskIds.contains(task)) {
neededConnections.add(taskToNodePortEntry.getValue());
}
}
}
}
Set<NodeInfo> currentConnections = cachedNodeToPortSocket.get().keySet();
Set<NodeInfo> newConnections = Sets.difference(neededConnections, currentConnections);
Set<NodeInfo> removeConnections = Sets.difference(currentConnections, neededConnections);
// Add new connections atomically
cachedNodeToPortSocket.getAndUpdate(prev -> {
Map<NodeInfo, IConnection> next = new HashMap<>(prev);
for (NodeInfo nodeInfo : newConnections) {
next.put(nodeInfo, mqContext.connect(topologyId, assignment.get_node_host().get(nodeInfo.get_node()), nodeInfo.get_port().iterator().next().intValue()));
}
return next;
});
try {
endpointSocketLock.writeLock().lock();
cachedTaskToNodePort.set(newTaskToNodePort);
} finally {
endpointSocketLock.writeLock().unlock();
}
for (NodeInfo nodeInfo : removeConnections) {
cachedNodeToPortSocket.get().get(nodeInfo).close();
}
// Remove old connections atomically
cachedNodeToPortSocket.getAndUpdate(prev -> {
Map<NodeInfo, IConnection> next = new HashMap<>(prev);
removeConnections.forEach(next::remove);
return next;
});
}
use of org.apache.storm.messaging.IConnection in project storm by apache.
the class TransferDrainer method send.
public void send(Map<Integer, NodeInfo> taskToNode, Map<NodeInfo, IConnection> connections) {
HashMap<NodeInfo, Stream<TaskMessage>> bundleMapByDestination = groupBundleByDestination(taskToNode);
for (Map.Entry<NodeInfo, Stream<TaskMessage>> entry : bundleMapByDestination.entrySet()) {
NodeInfo node = entry.getKey();
IConnection conn = connections.get(node);
if (conn != null) {
Iterator<TaskMessage> iter = entry.getValue().iterator();
if (iter.hasNext()) {
conn.send(iter);
}
} else {
LOG.warn("Connection not available for hostPort {}", node);
}
}
}
use of org.apache.storm.messaging.IConnection in project storm by apache.
the class Worker method shutdown.
@Override
public void shutdown() {
try {
LOG.info("Shutting down worker {} {} {}", topologyId, assignmentId, port);
if (workerState != null) {
for (IConnection socket : workerState.cachedNodeToPortSocket.get().values()) {
// this will do best effort flushing since the linger period
// was set on creation
socket.close();
}
LOG.info("Terminating messaging context");
LOG.info("Shutting down executors");
for (IRunningExecutor executor : executorsAtom.get()) {
((ExecutorShutdown) executor).shutdown();
}
LOG.info("Shut down executors");
LOG.info("Shutting down transfer thread");
workerState.haltWorkerTransfer();
if (transferThread != null) {
transferThread.interrupt();
transferThread.join();
LOG.info("Shut down transfer thread");
}
workerState.heartbeatTimer.close();
workerState.refreshConnectionsTimer.close();
workerState.refreshCredentialsTimer.close();
workerState.checkForUpdatedBlobsTimer.close();
workerState.refreshActiveTimer.close();
workerState.executorHeartbeatTimer.close();
workerState.userTimer.close();
workerState.refreshLoadTimer.close();
workerState.resetLogLevelsTimer.close();
workerState.flushTupleTimer.close();
workerState.backPressureCheckTimer.close();
// this is fine because the only time this is shared is when it's a local context,
// in which case it's a noop
workerState.mqContext.term();
workerState.closeResources();
LOG.info("Trigger any worker shutdown hooks");
workerState.runWorkerShutdownHooks();
workerState.stormClusterState.removeWorkerHeartbeat(topologyId, assignmentId, (long) port);
LOG.info("Disconnecting from storm cluster state context");
workerState.stormClusterState.disconnect();
workerState.stateStorage.close();
} else {
LOG.error("workerState is null");
}
metricRegistry.stop();
SharedMetricRegistries.remove(WORKER_METRICS_REGISTRY);
LOG.info("Shut down worker {} {} {}", topologyId, assignmentId, port);
} catch (Exception ex) {
throw Utils.wrapInRuntime(ex);
}
}
Aggregations