use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class LoadAwareShuffleGroupingTest method createLoadSwitchingContext.
// creates a WorkerTopologyContext with 3 tasks, one worker local, one host local,
// and one rack local
private WorkerTopologyContext createLoadSwitchingContext() {
WorkerTopologyContext context = mock(WorkerTopologyContext.class);
when(context.getConf()).thenReturn(createConf());
Map<Integer, NodeInfo> taskNodeToPort = new HashMap<>();
// worker local task
taskNodeToPort.put(1, new NodeInfo("node-id", Sets.newHashSet(6701L)));
// node local task
taskNodeToPort.put(2, new NodeInfo("node-id", Sets.newHashSet(6702L)));
// rack local task
taskNodeToPort.put(3, new NodeInfo("node-id2", Sets.newHashSet(6703L)));
when(context.getTaskToNodePort()).thenReturn(new AtomicReference<>(taskNodeToPort));
when(context.getAssignmentId()).thenReturn("node-id");
when(context.getThisWorkerPort()).thenReturn(6701);
Map<String, String> nodeToHost = new HashMap<>();
nodeToHost.put("node-id", "hostname1");
nodeToHost.put("node-id2", "hostname2");
when(context.getNodeToHost()).thenReturn(new AtomicReference<>(nodeToHost));
return context;
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class StormClusterStateImpl method getWorkerProfileRequests.
@Override
public List<ProfileRequest> getWorkerProfileRequests(String stormId, NodeInfo nodeInfo) {
List<ProfileRequest> requests = new ArrayList<>();
List<ProfileRequest> profileRequests = getTopologyProfileRequests(stormId);
for (ProfileRequest profileRequest : profileRequests) {
NodeInfo nodeInfo1 = profileRequest.get_nodeInfo();
if (nodeInfo1.equals(nodeInfo)) {
requests.add(profileRequest);
}
}
return requests;
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class StormClusterStateImpl method executorBeats.
/**
* need to take executor->node+port in explicitly so that we don't run into a situation where a long dead worker with a skewed clock
* overrides all the timestamps. By only checking heartbeats with an assigned node+port, and only reading executors from that heartbeat
* that are actually assigned, we avoid situations like that.
*
* @param stormId topology id
* @param executorNodePort executor id -> node + port
* @return mapping of executorInfo -> executor beat
*/
@Override
public Map<ExecutorInfo, ExecutorBeat> executorBeats(String stormId, Map<List<Long>, NodeInfo> executorNodePort) {
Map<ExecutorInfo, ExecutorBeat> executorWhbs = new HashMap<>();
Map<NodeInfo, List<List<Long>>> nodePortExecutors = Utils.reverseMap(executorNodePort);
for (Map.Entry<NodeInfo, List<List<Long>>> entry : nodePortExecutors.entrySet()) {
String node = entry.getKey().get_node();
Long port = entry.getKey().get_port_iterator().next();
ClusterWorkerHeartbeat whb = getWorkerHeartbeat(stormId, node, port);
List<ExecutorInfo> executorInfoList = new ArrayList<>();
for (List<Long> list : entry.getValue()) {
executorInfoList.add(new ExecutorInfo(list.get(0).intValue(), list.get(list.size() - 1).intValue()));
}
if (whb != null) {
executorWhbs.putAll(ClusterUtils.convertExecutorBeats(executorInfoList, whb));
}
}
return executorWhbs;
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class UIHelpers method setTopologyProfilingAction.
/**
* setTopologyProfilingAction.
* @param client client
* @param id id
* @param hostPort hostPort
* @param timestamp timestamp
* @param config config
* @param profileAction profileAction
* @throws TException TException
*/
public static void setTopologyProfilingAction(Nimbus.Iface client, String id, String hostPort, Long timestamp, Map<String, Object> config, ProfileAction profileAction) throws TException {
String host = hostPort.split(":")[0];
Set<Long> ports = new HashSet();
String port = hostPort.split(":")[1];
ports.add(Long.valueOf(port));
NodeInfo nodeInfo = new NodeInfo(host, ports);
ProfileRequest profileRequest = new ProfileRequest(nodeInfo, profileAction);
profileRequest.set_time_stamp(timestamp);
client.setWorkerProfiler(id, profileRequest);
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class LoadAwareShuffleGrouping method calculateScope.
private LocalityScope calculateScope(Map<Integer, NodeInfo> taskToNodePort, Map<String, String> nodeToHost, Map<String, String> hostToRack, int target) {
NodeInfo targetNodeInfo = taskToNodePort.get(target);
if (targetNodeInfo == null) {
return LocalityScope.EVERYTHING;
}
if (sourceNodeInfo.get_node().equals(targetNodeInfo.get_node())) {
if (sourceNodeInfo.get_port().equals(targetNodeInfo.get_port())) {
return LocalityScope.WORKER_LOCAL;
}
return LocalityScope.HOST_LOCAL;
} else {
String sourceHostname = nodeToHost.get(sourceNodeInfo.get_node());
String targetHostname = nodeToHost.get(targetNodeInfo.get_node());
String sourceRack = (sourceHostname == null) ? null : hostToRack.get(sourceHostname);
String targetRack = (targetHostname == null) ? null : hostToRack.get(targetHostname);
if (sourceRack != null && sourceRack.equals(targetRack)) {
return LocalityScope.RACK_LOCAL;
} else {
return LocalityScope.EVERYTHING;
}
}
}
Aggregations