use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class WorkerState method refreshConnections.
public void refreshConnections(Runnable callback) throws Exception {
Integer version = stormClusterState.assignmentVersion(topologyId, callback);
version = (null == version) ? 0 : version;
VersionedData<Assignment> assignmentVersion = assignmentVersions.get().get(topologyId);
Assignment assignment;
if (null != assignmentVersion && (assignmentVersion.getVersion() == version)) {
assignment = assignmentVersion.getData();
} else {
VersionedData<Assignment> newAssignmentVersion = new VersionedData<>(version, stormClusterState.assignmentInfoWithVersion(topologyId, callback).getData());
assignmentVersions.getAndUpdate(prev -> {
Map<String, VersionedData<Assignment>> next = new HashMap<>(prev);
next.put(topologyId, newAssignmentVersion);
return next;
});
assignment = newAssignmentVersion.getData();
}
Set<NodeInfo> neededConnections = new HashSet<>();
Map<Integer, NodeInfo> newTaskToNodePort = new HashMap<>();
if (null != assignment) {
Map<Integer, NodeInfo> taskToNodePort = StormCommon.taskToNodeport(assignment.get_executor_node_port());
for (Map.Entry<Integer, NodeInfo> taskToNodePortEntry : taskToNodePort.entrySet()) {
Integer task = taskToNodePortEntry.getKey();
if (outboundTasks.contains(task)) {
newTaskToNodePort.put(task, taskToNodePortEntry.getValue());
if (!taskIds.contains(task)) {
neededConnections.add(taskToNodePortEntry.getValue());
}
}
}
}
Set<NodeInfo> currentConnections = cachedNodeToPortSocket.get().keySet();
Set<NodeInfo> newConnections = Sets.difference(neededConnections, currentConnections);
Set<NodeInfo> removeConnections = Sets.difference(currentConnections, neededConnections);
// Add new connections atomically
cachedNodeToPortSocket.getAndUpdate(prev -> {
Map<NodeInfo, IConnection> next = new HashMap<>(prev);
for (NodeInfo nodeInfo : newConnections) {
next.put(nodeInfo, mqContext.connect(topologyId, assignment.get_node_host().get(nodeInfo.get_node()), nodeInfo.get_port().iterator().next().intValue()));
}
return next;
});
try {
endpointSocketLock.writeLock().lock();
cachedTaskToNodePort.set(newTaskToNodePort);
} finally {
endpointSocketLock.writeLock().unlock();
}
for (NodeInfo nodeInfo : removeConnections) {
cachedNodeToPortSocket.get().get(nodeInfo).close();
}
// Remove old connections atomically
cachedNodeToPortSocket.getAndUpdate(prev -> {
Map<NodeInfo, IConnection> next = new HashMap<>(prev);
removeConnections.forEach(next::remove);
return next;
});
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class WorkerState method readWorkerExecutors.
private List<List<Long>> readWorkerExecutors(IStormClusterState stormClusterState, String topologyId, String assignmentId, int port) {
LOG.info("Reading assignments");
List<List<Long>> executorsAssignedToThisWorker = new ArrayList<>();
executorsAssignedToThisWorker.add(Constants.SYSTEM_EXECUTOR_ID);
Map<List<Long>, NodeInfo> executorToNodePort = stormClusterState.assignmentInfo(topologyId, null).get_executor_node_port();
for (Map.Entry<List<Long>, NodeInfo> entry : executorToNodePort.entrySet()) {
NodeInfo nodeInfo = entry.getValue();
if (nodeInfo.get_node().equals(assignmentId) && nodeInfo.get_port().iterator().next() == port) {
executorsAssignedToThisWorker.add(entry.getKey());
}
}
return executorsAssignedToThisWorker;
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class Nimbus method computeNewTopoToExecToNodePort.
private static Map<String, Map<List<Long>, List<Object>>> computeNewTopoToExecToNodePort(Map<String, SchedulerAssignment> schedAssignments, Map<String, Assignment> existingAssignments) {
Map<String, Map<List<Long>, List<Object>>> ret = computeTopoToExecToNodePort(schedAssignments);
// Print some useful information
if (existingAssignments != null && !existingAssignments.isEmpty()) {
for (Entry<String, Map<List<Long>, List<Object>>> entry : ret.entrySet()) {
String topoId = entry.getKey();
Map<List<Long>, List<Object>> execToNodePort = entry.getValue();
Assignment assignment = existingAssignments.get(topoId);
if (assignment == null) {
continue;
}
Map<List<Long>, NodeInfo> old = assignment.get_executor_node_port();
Map<List<Long>, List<Object>> reassigned = new HashMap<>();
for (Entry<List<Long>, List<Object>> execAndNodePort : execToNodePort.entrySet()) {
NodeInfo oldAssigned = old.get(execAndNodePort.getKey());
String node = (String) execAndNodePort.getValue().get(0);
Long port = (Long) execAndNodePort.getValue().get(1);
if (oldAssigned == null || !oldAssigned.get_node().equals(node) || !port.equals(oldAssigned.get_port_iterator().next())) {
reassigned.put(execAndNodePort.getKey(), execAndNodePort.getValue());
}
}
if (!reassigned.isEmpty()) {
int count = (new HashSet<>(execToNodePort.values())).size();
Set<List<Long>> reExecs = reassigned.keySet();
LOG.info("Reassigning {} to {} slots", topoId, count);
LOG.info("Reassign executors: {}", reExecs);
}
}
}
return ret;
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class TransferDrainer method send.
public void send(Map<Integer, NodeInfo> taskToNode, Map<NodeInfo, IConnection> connections) {
HashMap<NodeInfo, Stream<TaskMessage>> bundleMapByDestination = groupBundleByDestination(taskToNode);
for (Map.Entry<NodeInfo, Stream<TaskMessage>> entry : bundleMapByDestination.entrySet()) {
NodeInfo node = entry.getKey();
IConnection conn = connections.get(node);
if (conn != null) {
Iterator<TaskMessage> iter = entry.getValue().iterator();
if (iter.hasNext()) {
conn.send(iter);
}
} else {
LOG.warn("Connection not available for hostPort {}", node);
}
}
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class LoadAwareShuffleGroupingTest method mockContext.
private WorkerTopologyContext mockContext(List<Integer> availableTaskIds) {
WorkerTopologyContext context = mock(WorkerTopologyContext.class);
when(context.getConf()).thenReturn(createConf());
Map<Integer, NodeInfo> taskNodeToPort = new HashMap<>();
NodeInfo nodeInfo = new NodeInfo("node-id", Sets.newHashSet(6700L));
availableTaskIds.forEach(e -> taskNodeToPort.put(e, nodeInfo));
when(context.getTaskToNodePort()).thenReturn(new AtomicReference<>(taskNodeToPort));
when(context.getAssignmentId()).thenReturn("node-id");
when(context.getThisWorkerPort()).thenReturn(6700);
AtomicReference<Map<String, String>> nodeToHost = new AtomicReference<>(Collections.singletonMap("node-id", "hostname1"));
when(context.getNodeToHost()).thenReturn(nodeToHost);
return context;
}
Aggregations