use of org.apache.storm.generated.Assignment in project storm by apache.
the class ReadClusterState method readAssignments.
protected Map<Integer, LocalAssignment> readAssignments(Map<String, VersionedData<Assignment>> assignmentsSnapshot) {
try {
Map<Integer, LocalAssignment> portLA = new HashMap<>();
for (Map.Entry<String, VersionedData<Assignment>> assignEntry : assignmentsSnapshot.entrySet()) {
String topoId = assignEntry.getKey();
Assignment assignment = assignEntry.getValue().getData();
Map<Integer, LocalAssignment> portTasks = readMyExecutors(topoId, assignmentId, assignment);
for (Map.Entry<Integer, LocalAssignment> entry : portTasks.entrySet()) {
Integer port = entry.getKey();
LocalAssignment la = entry.getValue();
if (!portLA.containsKey(port)) {
portLA.put(port, la);
} else {
throw new RuntimeException("Should not have multiple topologies assigned to one port " + port + " " + la + " " + portLA);
}
}
}
readRetry.set(0);
return portLA;
} catch (RuntimeException e) {
if (readRetry.get() > 2) {
throw e;
} else {
readRetry.addAndGet(1);
}
LOG.warn("{} : retrying {} of 3", e.getMessage(), readRetry.get());
return null;
}
}
use of org.apache.storm.generated.Assignment in project storm by apache.
the class WorkerState method refreshConnections.
public void refreshConnections() {
Assignment assignment = null;
try {
assignment = getLocalAssignment(stormClusterState, topologyId);
} catch (Exception e) {
LOG.warn("Failed to read assignment. This should only happen when topology is shutting down.", e);
}
suicideIfLocalAssignmentsChanged(assignment);
Set<NodeInfo> neededConnections = new HashSet<>();
Map<Integer, NodeInfo> newTaskToNodePort = new HashMap<>();
if (null != assignment) {
Map<Integer, NodeInfo> taskToNodePort = StormCommon.taskToNodeport(assignment.get_executor_node_port());
for (Map.Entry<Integer, NodeInfo> taskToNodePortEntry : taskToNodePort.entrySet()) {
Integer task = taskToNodePortEntry.getKey();
if (outboundTasks.contains(task)) {
newTaskToNodePort.put(task, taskToNodePortEntry.getValue());
if (!localTaskIds.contains(task)) {
neededConnections.add(taskToNodePortEntry.getValue());
}
}
}
}
final Set<NodeInfo> currentConnections = cachedNodeToPortSocket.get().keySet();
final Set<NodeInfo> newConnections = Sets.difference(neededConnections, currentConnections);
final Set<NodeInfo> removeConnections = Sets.difference(currentConnections, neededConnections);
Map<String, String> nodeHost = assignment != null ? assignment.get_node_host() : null;
// Add new connections atomically
cachedNodeToPortSocket.getAndUpdate(prev -> {
Map<NodeInfo, IConnection> next = new HashMap<>(prev);
for (NodeInfo nodeInfo : newConnections) {
next.put(nodeInfo, mqContext.connect(topologyId, // Host
nodeHost.get(nodeInfo.get_node()), // Port
nodeInfo.get_port().iterator().next().intValue(), workerTransfer.getRemoteBackPressureStatus()));
}
return next;
});
try {
endpointSocketLock.writeLock().lock();
cachedTaskToNodePort.set(newTaskToNodePort);
} finally {
endpointSocketLock.writeLock().unlock();
}
// It is okay that cachedNodeToHost can be temporarily out of sync with cachedTaskToNodePort
if (nodeHost != null) {
cachedNodeToHost.set(nodeHost);
} else {
cachedNodeToHost.set(new HashMap<>());
}
for (NodeInfo nodeInfo : removeConnections) {
cachedNodeToPortSocket.get().get(nodeInfo).close();
}
// Remove old connections atomically
cachedNodeToPortSocket.getAndUpdate(prev -> {
Map<NodeInfo, IConnection> next = new HashMap<>(prev);
removeConnections.forEach(next::remove);
return next;
});
}
use of org.apache.storm.generated.Assignment in project storm by apache.
the class Nimbus method getTopologyPageInfo.
@Override
public TopologyPageInfo getTopologyPageInfo(String topoId, String window, boolean includeSys) throws NotAliveException, AuthorizationException, TException {
try {
getTopologyPageInfoCalls.mark();
CommonTopoInfo common = getCommonTopoInfo(topoId, "getTopologyPageInfo");
String topoName = common.topoName;
IStormClusterState state = stormClusterState;
Assignment assignment = common.assignment;
Map<List<Integer>, Map<String, Object>> beats = common.beats;
Map<Integer, String> taskToComp = common.taskToComponent;
StormTopology topology = common.topology;
StormBase base = common.base;
if (base == null) {
throw new WrappedNotAliveException(topoId);
}
String owner = base.get_owner();
Map<WorkerSlot, WorkerResources> workerToResources = getWorkerResourcesForTopology(topoId);
List<WorkerSummary> workerSummaries = null;
Map<List<Long>, List<Object>> exec2NodePort = new HashMap<>();
if (assignment != null) {
Map<List<Long>, NodeInfo> execToNodeInfo = assignment.get_executor_node_port();
Map<String, String> nodeToHost = assignment.get_node_host();
for (Entry<List<Long>, NodeInfo> entry : execToNodeInfo.entrySet()) {
NodeInfo ni = entry.getValue();
List<Object> nodePort = Arrays.asList(ni.get_node(), ni.get_port_iterator().next());
exec2NodePort.put(entry.getKey(), nodePort);
}
workerSummaries = StatsUtil.aggWorkerStats(topoId, topoName, taskToComp, beats, exec2NodePort, nodeToHost, workerToResources, includeSys, // this is the topology page, so we know the user is authorized
true, null, owner);
}
TopologyPageInfo topoPageInfo = StatsUtil.aggTopoExecsStats(topoId, exec2NodePort, taskToComp, beats, topology, window, includeSys, state);
if (topology.is_set_storm_version()) {
topoPageInfo.set_storm_version(topology.get_storm_version());
}
Map<String, Object> topoConf = Utils.merge(conf, common.topoConf);
addSpoutAggStats(topoPageInfo, topology, topoConf);
addBoltAggStats(topoPageInfo, topology, topoConf, includeSys);
if (workerSummaries != null) {
topoPageInfo.set_workers(workerSummaries);
}
if (base.is_set_owner()) {
topoPageInfo.set_owner(base.get_owner());
}
if (base.is_set_topology_version()) {
topoPageInfo.set_topology_version(base.get_topology_version());
}
String schedStatus = idToSchedStatus.get().get(topoId);
if (schedStatus != null) {
topoPageInfo.set_sched_status(schedStatus);
}
TopologyResources resources = getResourcesForTopology(topoId, base);
if (resources != null && underlyingScheduler instanceof ResourceAwareScheduler) {
topoPageInfo.set_requested_memonheap(resources.getRequestedMemOnHeap());
topoPageInfo.set_requested_memoffheap(resources.getRequestedMemOffHeap());
topoPageInfo.set_requested_cpu(resources.getRequestedCpu());
topoPageInfo.set_assigned_memonheap(resources.getAssignedMemOnHeap());
topoPageInfo.set_assigned_memoffheap(resources.getAssignedMemOffHeap());
topoPageInfo.set_assigned_cpu(resources.getAssignedCpu());
topoPageInfo.set_requested_shared_off_heap_memory(resources.getRequestedSharedMemOffHeap());
topoPageInfo.set_requested_regular_off_heap_memory(resources.getRequestedNonSharedMemOffHeap());
topoPageInfo.set_requested_shared_on_heap_memory(resources.getRequestedSharedMemOnHeap());
topoPageInfo.set_requested_regular_on_heap_memory(resources.getRequestedNonSharedMemOnHeap());
topoPageInfo.set_assigned_shared_off_heap_memory(resources.getAssignedSharedMemOffHeap());
topoPageInfo.set_assigned_regular_off_heap_memory(resources.getAssignedNonSharedMemOffHeap());
topoPageInfo.set_assigned_shared_on_heap_memory(resources.getAssignedSharedMemOnHeap());
topoPageInfo.set_assigned_regular_on_heap_memory(resources.getAssignedNonSharedMemOnHeap());
topoPageInfo.set_assigned_generic_resources(resources.getAssignedGenericResources());
topoPageInfo.set_requested_generic_resources(resources.getRequestedGenericResources());
}
int launchTimeSecs = common.launchTimeSecs;
topoPageInfo.set_name(topoName);
topoPageInfo.set_status(extractStatusStr(base));
topoPageInfo.set_uptime_secs(Time.deltaSecs(launchTimeSecs));
topoPageInfo.set_topology_conf(JSONValue.toJSONString(topoConf));
topoPageInfo.set_replication_count(getBlobReplicationCount(ConfigUtils.masterStormCodeKey(topoId)));
if (base.is_set_component_debug()) {
DebugOptions debug = base.get_component_debug().get(topoId);
if (debug != null) {
topoPageInfo.set_debug_options(debug);
}
}
return topoPageInfo;
} catch (Exception e) {
LOG.warn("Get topo page info exception. (topology id='{}')", topoId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.generated.Assignment in project storm by apache.
the class Nimbus method computeTopologyToSchedulerAssignment.
/**
* Convert assignment information in zk to SchedulerAssignment, so it can be used by scheduler api.
*
* @param existingAssignments current assignments
* @param topologyToAliveExecutors executors that are alive
* @return topo ID to schedulerAssignment
*/
private Map<String, SchedulerAssignmentImpl> computeTopologyToSchedulerAssignment(Map<String, Assignment> existingAssignments, Map<String, Set<List<Integer>>> topologyToAliveExecutors) {
Map<String, SchedulerAssignmentImpl> ret = new HashMap<>();
for (Entry<String, Assignment> entry : existingAssignments.entrySet()) {
String topoId = entry.getKey();
Assignment assignment = entry.getValue();
Set<List<Integer>> aliveExecutors = topologyToAliveExecutors.get(topoId);
Map<List<Long>, NodeInfo> execToNodePort = assignment.get_executor_node_port();
Map<NodeInfo, WorkerResources> workerToResources = assignment.get_worker_resources();
Map<NodeInfo, WorkerSlot> nodePortToSlot = new HashMap<>();
Map<WorkerSlot, WorkerResources> slotToResources = new HashMap<>();
for (Entry<NodeInfo, WorkerResources> nodeAndResources : workerToResources.entrySet()) {
NodeInfo info = nodeAndResources.getKey();
WorkerResources resources = nodeAndResources.getValue();
WorkerSlot slot = new WorkerSlot(info.get_node(), info.get_port_iterator().next());
nodePortToSlot.put(info, slot);
slotToResources.put(slot, resources);
}
Map<ExecutorDetails, WorkerSlot> execToSlot = new HashMap<>();
for (Entry<List<Long>, NodeInfo> execAndNodePort : execToNodePort.entrySet()) {
List<Integer> exec = asIntExec(execAndNodePort.getKey());
NodeInfo info = execAndNodePort.getValue();
if (aliveExecutors.contains(exec)) {
execToSlot.put(new ExecutorDetails(exec.get(0), exec.get(1)), nodePortToSlot.get(info));
}
}
ret.put(topoId, new SchedulerAssignmentImpl(topoId, execToSlot, slotToResources, null));
}
return ret;
}
use of org.apache.storm.generated.Assignment in project storm by apache.
the class Nimbus method getWorkerResourcesForTopology.
private Map<WorkerSlot, WorkerResources> getWorkerResourcesForTopology(String topoId) {
Map<WorkerSlot, WorkerResources> ret = idToWorkerResources.get().get(topoId);
if (ret == null) {
IStormClusterState state = stormClusterState;
ret = new HashMap<>();
Assignment assignment = state.assignmentInfo(topoId, null);
if (assignment != null && assignment.is_set_worker_resources()) {
for (Entry<NodeInfo, WorkerResources> entry : assignment.get_worker_resources().entrySet()) {
NodeInfo ni = entry.getKey();
WorkerSlot slot = new WorkerSlot(ni.get_node(), ni.get_port_iterator().next());
ret.put(slot, entry.getValue());
}
idToWorkerResources.getAndUpdate(new Assoc<>(topoId, ret));
}
}
return ret;
}
Aggregations