Search in sources :

Example 1 with Assignment

use of org.apache.storm.generated.Assignment in project storm by apache.

the class ReadClusterState method readAssignments.

protected Map<Integer, LocalAssignment> readAssignments(Map<String, VersionedData<Assignment>> assignmentsSnapshot) {
    try {
        Map<Integer, LocalAssignment> portLA = new HashMap<>();
        for (Map.Entry<String, VersionedData<Assignment>> assignEntry : assignmentsSnapshot.entrySet()) {
            String topoId = assignEntry.getKey();
            Assignment assignment = assignEntry.getValue().getData();
            Map<Integer, LocalAssignment> portTasks = readMyExecutors(topoId, assignmentId, assignment);
            for (Map.Entry<Integer, LocalAssignment> entry : portTasks.entrySet()) {
                Integer port = entry.getKey();
                LocalAssignment la = entry.getValue();
                if (!portLA.containsKey(port)) {
                    portLA.put(port, la);
                } else {
                    throw new RuntimeException("Should not have multiple topologies assigned to one port " + port + " " + la + " " + portLA);
                }
            }
        }
        readRetry.set(0);
        return portLA;
    } catch (RuntimeException e) {
        if (readRetry.get() > 2) {
            throw e;
        } else {
            readRetry.addAndGet(1);
        }
        LOG.warn("{} : retrying {} of 3", e.getMessage(), readRetry.get());
        return null;
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Assignment(org.apache.storm.generated.Assignment) LocalAssignment(org.apache.storm.generated.LocalAssignment) HashMap(java.util.HashMap) LocalAssignment(org.apache.storm.generated.LocalAssignment) HashMap(java.util.HashMap) Map(java.util.Map) VersionedData(org.apache.storm.cluster.VersionedData)

Example 2 with Assignment

use of org.apache.storm.generated.Assignment in project storm by apache.

the class WorkerState method refreshConnections.

public void refreshConnections() {
    Assignment assignment = null;
    try {
        assignment = getLocalAssignment(stormClusterState, topologyId);
    } catch (Exception e) {
        LOG.warn("Failed to read assignment. This should only happen when topology is shutting down.", e);
    }
    suicideIfLocalAssignmentsChanged(assignment);
    Set<NodeInfo> neededConnections = new HashSet<>();
    Map<Integer, NodeInfo> newTaskToNodePort = new HashMap<>();
    if (null != assignment) {
        Map<Integer, NodeInfo> taskToNodePort = StormCommon.taskToNodeport(assignment.get_executor_node_port());
        for (Map.Entry<Integer, NodeInfo> taskToNodePortEntry : taskToNodePort.entrySet()) {
            Integer task = taskToNodePortEntry.getKey();
            if (outboundTasks.contains(task)) {
                newTaskToNodePort.put(task, taskToNodePortEntry.getValue());
                if (!localTaskIds.contains(task)) {
                    neededConnections.add(taskToNodePortEntry.getValue());
                }
            }
        }
    }
    final Set<NodeInfo> currentConnections = cachedNodeToPortSocket.get().keySet();
    final Set<NodeInfo> newConnections = Sets.difference(neededConnections, currentConnections);
    final Set<NodeInfo> removeConnections = Sets.difference(currentConnections, neededConnections);
    Map<String, String> nodeHost = assignment != null ? assignment.get_node_host() : null;
    // Add new connections atomically
    cachedNodeToPortSocket.getAndUpdate(prev -> {
        Map<NodeInfo, IConnection> next = new HashMap<>(prev);
        for (NodeInfo nodeInfo : newConnections) {
            next.put(nodeInfo, mqContext.connect(topologyId, // Host
            nodeHost.get(nodeInfo.get_node()), // Port
            nodeInfo.get_port().iterator().next().intValue(), workerTransfer.getRemoteBackPressureStatus()));
        }
        return next;
    });
    try {
        endpointSocketLock.writeLock().lock();
        cachedTaskToNodePort.set(newTaskToNodePort);
    } finally {
        endpointSocketLock.writeLock().unlock();
    }
    // It is okay that cachedNodeToHost can be temporarily out of sync with cachedTaskToNodePort
    if (nodeHost != null) {
        cachedNodeToHost.set(nodeHost);
    } else {
        cachedNodeToHost.set(new HashMap<>());
    }
    for (NodeInfo nodeInfo : removeConnections) {
        cachedNodeToPortSocket.get().get(nodeInfo).close();
    }
    // Remove old connections atomically
    cachedNodeToPortSocket.getAndUpdate(prev -> {
        Map<NodeInfo, IConnection> next = new HashMap<>(prev);
        removeConnections.forEach(next::remove);
        return next;
    });
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) IConnection(org.apache.storm.messaging.IConnection) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) IOException(java.io.IOException) Assignment(org.apache.storm.generated.Assignment) NodeInfo(org.apache.storm.generated.NodeInfo) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ImmutableMap(org.apache.storm.shade.com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet)

Example 3 with Assignment

use of org.apache.storm.generated.Assignment in project storm by apache.

the class Nimbus method getTopologyPageInfo.

@Override
public TopologyPageInfo getTopologyPageInfo(String topoId, String window, boolean includeSys) throws NotAliveException, AuthorizationException, TException {
    try {
        getTopologyPageInfoCalls.mark();
        CommonTopoInfo common = getCommonTopoInfo(topoId, "getTopologyPageInfo");
        String topoName = common.topoName;
        IStormClusterState state = stormClusterState;
        Assignment assignment = common.assignment;
        Map<List<Integer>, Map<String, Object>> beats = common.beats;
        Map<Integer, String> taskToComp = common.taskToComponent;
        StormTopology topology = common.topology;
        StormBase base = common.base;
        if (base == null) {
            throw new WrappedNotAliveException(topoId);
        }
        String owner = base.get_owner();
        Map<WorkerSlot, WorkerResources> workerToResources = getWorkerResourcesForTopology(topoId);
        List<WorkerSummary> workerSummaries = null;
        Map<List<Long>, List<Object>> exec2NodePort = new HashMap<>();
        if (assignment != null) {
            Map<List<Long>, NodeInfo> execToNodeInfo = assignment.get_executor_node_port();
            Map<String, String> nodeToHost = assignment.get_node_host();
            for (Entry<List<Long>, NodeInfo> entry : execToNodeInfo.entrySet()) {
                NodeInfo ni = entry.getValue();
                List<Object> nodePort = Arrays.asList(ni.get_node(), ni.get_port_iterator().next());
                exec2NodePort.put(entry.getKey(), nodePort);
            }
            workerSummaries = StatsUtil.aggWorkerStats(topoId, topoName, taskToComp, beats, exec2NodePort, nodeToHost, workerToResources, includeSys, // this is the topology page, so we know the user is authorized
            true, null, owner);
        }
        TopologyPageInfo topoPageInfo = StatsUtil.aggTopoExecsStats(topoId, exec2NodePort, taskToComp, beats, topology, window, includeSys, state);
        if (topology.is_set_storm_version()) {
            topoPageInfo.set_storm_version(topology.get_storm_version());
        }
        Map<String, Object> topoConf = Utils.merge(conf, common.topoConf);
        addSpoutAggStats(topoPageInfo, topology, topoConf);
        addBoltAggStats(topoPageInfo, topology, topoConf, includeSys);
        if (workerSummaries != null) {
            topoPageInfo.set_workers(workerSummaries);
        }
        if (base.is_set_owner()) {
            topoPageInfo.set_owner(base.get_owner());
        }
        if (base.is_set_topology_version()) {
            topoPageInfo.set_topology_version(base.get_topology_version());
        }
        String schedStatus = idToSchedStatus.get().get(topoId);
        if (schedStatus != null) {
            topoPageInfo.set_sched_status(schedStatus);
        }
        TopologyResources resources = getResourcesForTopology(topoId, base);
        if (resources != null && underlyingScheduler instanceof ResourceAwareScheduler) {
            topoPageInfo.set_requested_memonheap(resources.getRequestedMemOnHeap());
            topoPageInfo.set_requested_memoffheap(resources.getRequestedMemOffHeap());
            topoPageInfo.set_requested_cpu(resources.getRequestedCpu());
            topoPageInfo.set_assigned_memonheap(resources.getAssignedMemOnHeap());
            topoPageInfo.set_assigned_memoffheap(resources.getAssignedMemOffHeap());
            topoPageInfo.set_assigned_cpu(resources.getAssignedCpu());
            topoPageInfo.set_requested_shared_off_heap_memory(resources.getRequestedSharedMemOffHeap());
            topoPageInfo.set_requested_regular_off_heap_memory(resources.getRequestedNonSharedMemOffHeap());
            topoPageInfo.set_requested_shared_on_heap_memory(resources.getRequestedSharedMemOnHeap());
            topoPageInfo.set_requested_regular_on_heap_memory(resources.getRequestedNonSharedMemOnHeap());
            topoPageInfo.set_assigned_shared_off_heap_memory(resources.getAssignedSharedMemOffHeap());
            topoPageInfo.set_assigned_regular_off_heap_memory(resources.getAssignedNonSharedMemOffHeap());
            topoPageInfo.set_assigned_shared_on_heap_memory(resources.getAssignedSharedMemOnHeap());
            topoPageInfo.set_assigned_regular_on_heap_memory(resources.getAssignedNonSharedMemOnHeap());
            topoPageInfo.set_assigned_generic_resources(resources.getAssignedGenericResources());
            topoPageInfo.set_requested_generic_resources(resources.getRequestedGenericResources());
        }
        int launchTimeSecs = common.launchTimeSecs;
        topoPageInfo.set_name(topoName);
        topoPageInfo.set_status(extractStatusStr(base));
        topoPageInfo.set_uptime_secs(Time.deltaSecs(launchTimeSecs));
        topoPageInfo.set_topology_conf(JSONValue.toJSONString(topoConf));
        topoPageInfo.set_replication_count(getBlobReplicationCount(ConfigUtils.masterStormCodeKey(topoId)));
        if (base.is_set_component_debug()) {
            DebugOptions debug = base.get_component_debug().get(topoId);
            if (debug != null) {
                topoPageInfo.set_debug_options(debug);
            }
        }
        return topoPageInfo;
    } catch (Exception e) {
        LOG.warn("Get topo page info exception. (topology id='{}')", topoId, e);
        if (e instanceof TException) {
            throw (TException) e;
        }
        throw new RuntimeException(e);
    }
}
Also used : TException(org.apache.storm.thrift.TException) HashMap(java.util.HashMap) StormTopology(org.apache.storm.generated.StormTopology) StormBase(org.apache.storm.generated.StormBase) DebugOptions(org.apache.storm.generated.DebugOptions) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) Assignment(org.apache.storm.generated.Assignment) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) ArrayList(java.util.ArrayList) List(java.util.List) IStormClusterState(org.apache.storm.cluster.IStormClusterState) WorkerResources(org.apache.storm.generated.WorkerResources) TopologyPageInfo(org.apache.storm.generated.TopologyPageInfo) WrappedNotAliveException(org.apache.storm.utils.WrappedNotAliveException) WorkerMetricPoint(org.apache.storm.generated.WorkerMetricPoint) DataPoint(org.apache.storm.metric.api.DataPoint) WrappedAuthorizationException(org.apache.storm.utils.WrappedAuthorizationException) IOException(java.io.IOException) IllegalStateException(org.apache.storm.generated.IllegalStateException) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) WrappedNotAliveException(org.apache.storm.utils.WrappedNotAliveException) WrappedInvalidTopologyException(org.apache.storm.utils.WrappedInvalidTopologyException) AuthorizationException(org.apache.storm.generated.AuthorizationException) NotAliveException(org.apache.storm.generated.NotAliveException) WrappedAlreadyAliveException(org.apache.storm.utils.WrappedAlreadyAliveException) InterruptedIOException(java.io.InterruptedIOException) KeyAlreadyExistsException(org.apache.storm.generated.KeyAlreadyExistsException) TException(org.apache.storm.thrift.TException) WrappedIllegalStateException(org.apache.storm.utils.WrappedIllegalStateException) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) BindException(java.net.BindException) WorkerSummary(org.apache.storm.generated.WorkerSummary) NodeInfo(org.apache.storm.generated.NodeInfo) Map(java.util.Map) NavigableMap(java.util.NavigableMap) RotatingMap(org.apache.storm.utils.RotatingMap) ImmutableMap(org.apache.storm.shade.com.google.common.collect.ImmutableMap) TimeCacheMap(org.apache.storm.utils.TimeCacheMap) HashMap(java.util.HashMap)

Example 4 with Assignment

use of org.apache.storm.generated.Assignment in project storm by apache.

the class Nimbus method computeTopologyToSchedulerAssignment.

/**
 * Convert assignment information in zk to SchedulerAssignment, so it can be used by scheduler api.
 *
 * @param existingAssignments      current assignments
 * @param topologyToAliveExecutors executors that are alive
 * @return topo ID to schedulerAssignment
 */
private Map<String, SchedulerAssignmentImpl> computeTopologyToSchedulerAssignment(Map<String, Assignment> existingAssignments, Map<String, Set<List<Integer>>> topologyToAliveExecutors) {
    Map<String, SchedulerAssignmentImpl> ret = new HashMap<>();
    for (Entry<String, Assignment> entry : existingAssignments.entrySet()) {
        String topoId = entry.getKey();
        Assignment assignment = entry.getValue();
        Set<List<Integer>> aliveExecutors = topologyToAliveExecutors.get(topoId);
        Map<List<Long>, NodeInfo> execToNodePort = assignment.get_executor_node_port();
        Map<NodeInfo, WorkerResources> workerToResources = assignment.get_worker_resources();
        Map<NodeInfo, WorkerSlot> nodePortToSlot = new HashMap<>();
        Map<WorkerSlot, WorkerResources> slotToResources = new HashMap<>();
        for (Entry<NodeInfo, WorkerResources> nodeAndResources : workerToResources.entrySet()) {
            NodeInfo info = nodeAndResources.getKey();
            WorkerResources resources = nodeAndResources.getValue();
            WorkerSlot slot = new WorkerSlot(info.get_node(), info.get_port_iterator().next());
            nodePortToSlot.put(info, slot);
            slotToResources.put(slot, resources);
        }
        Map<ExecutorDetails, WorkerSlot> execToSlot = new HashMap<>();
        for (Entry<List<Long>, NodeInfo> execAndNodePort : execToNodePort.entrySet()) {
            List<Integer> exec = asIntExec(execAndNodePort.getKey());
            NodeInfo info = execAndNodePort.getValue();
            if (aliveExecutors.contains(exec)) {
                execToSlot.put(new ExecutorDetails(exec.get(0), exec.get(1)), nodePortToSlot.get(info));
            }
        }
        ret.put(topoId, new SchedulerAssignmentImpl(topoId, execToSlot, slotToResources, null));
    }
    return ret;
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) WorkerResources(org.apache.storm.generated.WorkerResources) Assignment(org.apache.storm.generated.Assignment) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) NodeInfo(org.apache.storm.generated.NodeInfo) ArrayList(java.util.ArrayList) List(java.util.List)

Example 5 with Assignment

use of org.apache.storm.generated.Assignment in project storm by apache.

the class Nimbus method getWorkerResourcesForTopology.

private Map<WorkerSlot, WorkerResources> getWorkerResourcesForTopology(String topoId) {
    Map<WorkerSlot, WorkerResources> ret = idToWorkerResources.get().get(topoId);
    if (ret == null) {
        IStormClusterState state = stormClusterState;
        ret = new HashMap<>();
        Assignment assignment = state.assignmentInfo(topoId, null);
        if (assignment != null && assignment.is_set_worker_resources()) {
            for (Entry<NodeInfo, WorkerResources> entry : assignment.get_worker_resources().entrySet()) {
                NodeInfo ni = entry.getKey();
                WorkerSlot slot = new WorkerSlot(ni.get_node(), ni.get_port_iterator().next());
                ret.put(slot, entry.getValue());
            }
            idToWorkerResources.getAndUpdate(new Assoc<>(topoId, ret));
        }
    }
    return ret;
}
Also used : Assignment(org.apache.storm.generated.Assignment) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) WorkerResources(org.apache.storm.generated.WorkerResources) NodeInfo(org.apache.storm.generated.NodeInfo) IStormClusterState(org.apache.storm.cluster.IStormClusterState)

Aggregations

Assignment (org.apache.storm.generated.Assignment)25 HashMap (java.util.HashMap)19 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)14 List (java.util.List)12 NodeInfo (org.apache.storm.generated.NodeInfo)12 ArrayList (java.util.ArrayList)11 HashSet (java.util.HashSet)10 Map (java.util.Map)9 IOException (java.io.IOException)8 IStormClusterState (org.apache.storm.cluster.IStormClusterState)7 InvalidTopologyException (org.apache.storm.generated.InvalidTopologyException)7 KeyNotFoundException (org.apache.storm.generated.KeyNotFoundException)7 WrappedNotAliveException (org.apache.storm.utils.WrappedNotAliveException)7 InterruptedIOException (java.io.InterruptedIOException)6 BindException (java.net.BindException)6 AlreadyAliveException (org.apache.storm.generated.AlreadyAliveException)6 AuthorizationException (org.apache.storm.generated.AuthorizationException)6 IllegalStateException (org.apache.storm.generated.IllegalStateException)6 KeyAlreadyExistsException (org.apache.storm.generated.KeyAlreadyExistsException)6 NotAliveException (org.apache.storm.generated.NotAliveException)6