Search in sources :

Example 26 with NodeInfo

use of org.apache.storm.generated.NodeInfo in project storm by apache.

the class LoadAwareShuffleGrouping method prepare.

@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
    random = new Random();
    sourceNodeInfo = new NodeInfo(context.getAssignmentId(), Sets.newHashSet((long) context.getThisWorkerPort()));
    taskToNodePort = context.getTaskToNodePort();
    nodeToHost = context.getNodeToHost();
    this.targetTasks = targetTasks;
    capacity = targetTasks.size() == 1 ? 1 : Math.max(1000, targetTasks.size() * 5);
    conf = context.getConf();
    dnsToSwitchMapping = ReflectionUtils.newInstance((String) conf.get(Config.STORM_NETWORK_TOPOGRAPHY_PLUGIN));
    localityGroup = new HashMap<>();
    currentScope = LocalityScope.WORKER_LOCAL;
    higherBound = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_LOCALITYAWARE_HIGHER_BOUND));
    lowerBound = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_LOCALITYAWARE_LOWER_BOUND));
    rets = (List<Integer>[]) new List<?>[targetTasks.size()];
    int i = 0;
    for (int target : targetTasks) {
        rets[i] = Arrays.asList(target);
        orig.put(target, new IndexAndWeights(i));
        i++;
    }
    // can't leave choices to be empty, so initiate it similar as ShuffleGrouping
    choices = new int[capacity];
    current = new AtomicInteger(0);
    // allocate another array to be switched
    prepareChoices = new int[capacity];
    updateRing(null);
}
Also used : Random(java.util.Random) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) NodeInfo(org.apache.storm.generated.NodeInfo) ArrayList(java.util.ArrayList) List(java.util.List)

Example 27 with NodeInfo

use of org.apache.storm.generated.NodeInfo in project storm by apache.

the class Nimbus method assignmentChangedNodes.

/**
 * Diff old/new assignment to find nodes which assigned assignments has changed.
 *
 * @param oldAss old assigned assignment
 * @param newAss new assigned assignment
 * @return nodeId -> host map of assignments changed nodes
 */
private static Map<String, String> assignmentChangedNodes(Assignment oldAss, Assignment newAss) {
    Map<List<Long>, NodeInfo> oldExecutorNodePort = null;
    Map<List<Long>, NodeInfo> newExecutorNodePort = null;
    Map<String, String> allNodeHost = new HashMap<>();
    if (oldAss != null) {
        oldExecutorNodePort = oldAss.get_executor_node_port();
        allNodeHost.putAll(oldAss.get_node_host());
    }
    if (newAss != null) {
        newExecutorNodePort = newAss.get_executor_node_port();
        allNodeHost.putAll(newAss.get_node_host());
    }
    // kill or newly submit
    if (oldAss == null || newAss == null) {
        return allNodeHost;
    } else {
        // rebalance
        Map<String, String> ret = new HashMap<>();
        for (Map.Entry<List<Long>, NodeInfo> entry : newExecutorNodePort.entrySet()) {
            NodeInfo newNodeInfo = entry.getValue();
            NodeInfo oldNodeInfo = oldExecutorNodePort.get(entry.getKey());
            if (null != oldNodeInfo) {
                if (!oldNodeInfo.equals(newNodeInfo)) {
                    ret.put(oldNodeInfo.get_node(), allNodeHost.get(oldNodeInfo.get_node()));
                    ret.put(newNodeInfo.get_node(), allNodeHost.get(newNodeInfo.get_node()));
                }
            } else {
                ret.put(newNodeInfo.get_node(), allNodeHost.get(newNodeInfo.get_node()));
            }
        }
        return ret;
    }
}
Also used : HashMap(java.util.HashMap) NodeInfo(org.apache.storm.generated.NodeInfo) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) NavigableMap(java.util.NavigableMap) RotatingMap(org.apache.storm.utils.RotatingMap) ImmutableMap(org.apache.storm.shade.com.google.common.collect.ImmutableMap) TimeCacheMap(org.apache.storm.utils.TimeCacheMap) HashMap(java.util.HashMap)

Example 28 with NodeInfo

use of org.apache.storm.generated.NodeInfo in project storm by apache.

the class Nimbus method newlyAddedSlots.

private static Set<WorkerSlot> newlyAddedSlots(Assignment old, Assignment current) {
    Set<NodeInfo> oldSlots = new HashSet<>(old.get_executor_node_port().values());
    Set<NodeInfo> niRet = new HashSet<>(current.get_executor_node_port().values());
    niRet.removeAll(oldSlots);
    Set<WorkerSlot> ret = new HashSet<>();
    for (NodeInfo ni : niRet) {
        ret.add(new WorkerSlot(ni.get_node(), ni.get_port_iterator().next()));
    }
    return ret;
}
Also used : WorkerSlot(org.apache.storm.scheduler.WorkerSlot) NodeInfo(org.apache.storm.generated.NodeInfo) HashSet(java.util.HashSet)

Example 29 with NodeInfo

use of org.apache.storm.generated.NodeInfo in project storm by apache.

the class Nimbus method lockingMkAssignments.

private void lockingMkAssignments(Map<String, Assignment> existingAssignments, Map<String, StormBase> bases, String scratchTopoId, List<String> assignedTopologyIds, IStormClusterState state, Map<String, TopologyDetails> tds) throws Exception {
    Topologies topologies = new Topologies(tds);
    synchronized (schedLock) {
        Map<String, SchedulerAssignment> newSchedulerAssignments = computeNewSchedulerAssignments(existingAssignments, topologies, bases, scratchTopoId);
        Map<String, Map<List<Long>, List<Object>>> topologyToExecutorToNodePort = computeTopoToExecToNodePort(newSchedulerAssignments, assignedTopologyIds);
        Map<String, Map<WorkerSlot, WorkerResources>> newAssignedWorkerToResources = computeTopoToNodePortToResources(newSchedulerAssignments);
        int nowSecs = Time.currentTimeSecs();
        Map<String, SupervisorDetails> basicSupervisorDetailsMap = basicSupervisorDetailsMap(state);
        // construct the final Assignments by adding start-times etc into it
        Map<String, Assignment> newAssignments = new HashMap<>();
        for (Entry<String, Map<List<Long>, List<Object>>> entry : topologyToExecutorToNodePort.entrySet()) {
            String topoId = entry.getKey();
            Map<List<Long>, List<Object>> execToNodePort = entry.getValue();
            if (execToNodePort == null) {
                execToNodePort = new HashMap<>();
            }
            Set<String> allNodes = new HashSet<>();
            for (List<Object> nodePort : execToNodePort.values()) {
                allNodes.add((String) nodePort.get(0));
            }
            Map<String, String> allNodeHost = new HashMap<>();
            Assignment existingAssignment = existingAssignments.get(topoId);
            if (existingAssignment != null) {
                allNodeHost.putAll(existingAssignment.get_node_host());
            }
            for (String node : allNodes) {
                String host = inimbus.getHostName(basicSupervisorDetailsMap, node);
                if (host != null) {
                    allNodeHost.put(node, host);
                }
            }
            Map<List<Long>, NodeInfo> execNodeInfo = null;
            if (existingAssignment != null) {
                execNodeInfo = existingAssignment.get_executor_node_port();
            }
            List<List<Long>> reassignExecutors = changedExecutors(execNodeInfo, execToNodePort);
            Map<List<Long>, Long> startTimes = new HashMap<>();
            if (existingAssignment != null) {
                startTimes.putAll(existingAssignment.get_executor_start_time_secs());
            }
            for (List<Long> id : reassignExecutors) {
                startTimes.put(id, (long) nowSecs);
            }
            Map<WorkerSlot, WorkerResources> workerToResources = newAssignedWorkerToResources.get(topoId);
            if (workerToResources == null) {
                workerToResources = new HashMap<>();
            }
            Assignment newAssignment = new Assignment((String) conf.get(Config.STORM_LOCAL_DIR));
            Map<String, String> justAssignedKeys = new HashMap<>(allNodeHost);
            // Modifies justAssignedKeys
            justAssignedKeys.keySet().retainAll(allNodes);
            newAssignment.set_node_host(justAssignedKeys);
            // convert NodePort to NodeInfo (again!!!).
            Map<List<Long>, NodeInfo> execToNodeInfo = new HashMap<>();
            for (Entry<List<Long>, List<Object>> execAndNodePort : execToNodePort.entrySet()) {
                List<Object> nodePort = execAndNodePort.getValue();
                NodeInfo ni = new NodeInfo();
                ni.set_node((String) nodePort.get(0));
                ni.add_to_port((Long) nodePort.get(1));
                execToNodeInfo.put(execAndNodePort.getKey(), ni);
            }
            newAssignment.set_executor_node_port(execToNodeInfo);
            newAssignment.set_executor_start_time_secs(startTimes);
            // do another conversion (lets just make this all common)
            Map<NodeInfo, WorkerResources> workerResources = new HashMap<>();
            for (Entry<WorkerSlot, WorkerResources> wr : workerToResources.entrySet()) {
                WorkerSlot nodePort = wr.getKey();
                NodeInfo ni = new NodeInfo();
                ni.set_node(nodePort.getNodeId());
                ni.add_to_port(nodePort.getPort());
                WorkerResources resources = wr.getValue();
                workerResources.put(ni, resources);
            }
            newAssignment.set_worker_resources(workerResources);
            TopologyDetails td = tds.get(topoId);
            newAssignment.set_owner(td.getTopologySubmitter());
            newAssignments.put(topoId, newAssignment);
        }
        boolean assignmentChanged = auditAssignmentChanges(existingAssignments, newAssignments);
        if (assignmentChanged) {
            LOG.debug("RESETTING id->resources and id->worker-resources cache!");
            idToResources.set(new HashMap<>());
            idToWorkerResources.set(new HashMap<>());
        }
        // only log/set when there's been a change to the assignment
        for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
            String topoId = entry.getKey();
            Assignment assignment = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            TopologyDetails td = topologies.getById(topoId);
            if (assignment.equals(existingAssignment)) {
                LOG.debug("Assignment for {} hasn't changed", topoId);
            } else {
                LOG.info("Setting new assignment for topology id {}: {}", topoId, assignment);
                state.setAssignment(topoId, assignment, td.getConf());
            }
        }
        // grouping assignment by node to see the nodes diff, then notify nodes/supervisors to synchronize its owned assignment
        // because the number of existing assignments is small for every scheduling round,
        // we expect to notify supervisors at almost the same time
        Map<String, String> totalAssignmentsChangedNodes = new HashMap<>();
        for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
            String topoId = entry.getKey();
            Assignment assignment = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            totalAssignmentsChangedNodes.putAll(assignmentChangedNodes(existingAssignment, assignment));
        }
        notifySupervisorsAssignments(newAssignments, assignmentsDistributer, totalAssignmentsChangedNodes, basicSupervisorDetailsMap, getMetricsRegistry());
        Map<String, Collection<WorkerSlot>> addedSlots = new HashMap<>();
        for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
            String topoId = entry.getKey();
            Assignment assignment = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            if (existingAssignment == null) {
                existingAssignment = new Assignment();
                existingAssignment.set_executor_node_port(new HashMap<>());
                existingAssignment.set_executor_start_time_secs(new HashMap<>());
            }
            Set<WorkerSlot> newSlots = newlyAddedSlots(existingAssignment, assignment);
            addedSlots.put(topoId, newSlots);
        }
        inimbus.assignSlots(topologies, addedSlots);
    }
}
Also used : HashMap(java.util.HashMap) Assignment(org.apache.storm.generated.Assignment) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) ArrayList(java.util.ArrayList) List(java.util.List) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) WorkerResources(org.apache.storm.generated.WorkerResources) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) WorkerMetricPoint(org.apache.storm.generated.WorkerMetricPoint) DataPoint(org.apache.storm.metric.api.DataPoint) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) NodeInfo(org.apache.storm.generated.NodeInfo) AtomicLong(java.util.concurrent.atomic.AtomicLong) Collection(java.util.Collection) Map(java.util.Map) NavigableMap(java.util.NavigableMap) RotatingMap(org.apache.storm.utils.RotatingMap) ImmutableMap(org.apache.storm.shade.com.google.common.collect.ImmutableMap) TimeCacheMap(org.apache.storm.utils.TimeCacheMap) HashMap(java.util.HashMap)

Example 30 with NodeInfo

use of org.apache.storm.generated.NodeInfo in project storm by apache.

the class ReadClusterState method readMyExecutors.

protected Map<Integer, LocalAssignment> readMyExecutors(String topoId, String assignmentId, Assignment assignment) {
    Map<Integer, LocalAssignment> portTasks = new HashMap<>();
    Map<Long, WorkerResources> slotsResources = new HashMap<>();
    Map<NodeInfo, WorkerResources> nodeInfoWorkerResourcesMap = assignment.get_worker_resources();
    if (nodeInfoWorkerResourcesMap != null) {
        for (Map.Entry<NodeInfo, WorkerResources> entry : nodeInfoWorkerResourcesMap.entrySet()) {
            if (entry.getKey().get_node().startsWith(assignmentId)) {
                Set<Long> ports = entry.getKey().get_port();
                for (Long port : ports) {
                    slotsResources.put(port, entry.getValue());
                }
            }
        }
    }
    boolean hasShared = false;
    double amountShared = 0.0;
    if (assignment.is_set_total_shared_off_heap()) {
        Double d = assignment.get_total_shared_off_heap().get(assignmentId);
        if (d != null) {
            amountShared = d;
            hasShared = true;
        }
    }
    Map<List<Long>, NodeInfo> executorNodePort = assignment.get_executor_node_port();
    if (executorNodePort != null) {
        for (Map.Entry<List<Long>, NodeInfo> entry : executorNodePort.entrySet()) {
            if (entry.getValue().get_node().startsWith(assignmentId)) {
                for (Long port : entry.getValue().get_port()) {
                    LocalAssignment localAssignment = portTasks.get(port.intValue());
                    if (localAssignment == null) {
                        List<ExecutorInfo> executors = new ArrayList<>();
                        localAssignment = new LocalAssignment(topoId, executors);
                        if (slotsResources.containsKey(port)) {
                            localAssignment.set_resources(slotsResources.get(port));
                        }
                        if (hasShared) {
                            localAssignment.set_total_node_shared(amountShared);
                        }
                        if (assignment.is_set_owner()) {
                            localAssignment.set_owner(assignment.get_owner());
                        }
                        portTasks.put(port.intValue(), localAssignment);
                    }
                    List<ExecutorInfo> executorInfoList = localAssignment.get_executors();
                    executorInfoList.add(new ExecutorInfo(entry.getKey().get(0).intValue(), entry.getKey().get(entry.getKey().size() - 1).intValue()));
                }
            }
        }
    }
    return portTasks;
}
Also used : HashMap(java.util.HashMap) WorkerResources(org.apache.storm.generated.WorkerResources) ArrayList(java.util.ArrayList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorInfo(org.apache.storm.generated.ExecutorInfo) NodeInfo(org.apache.storm.generated.NodeInfo) LocalAssignment(org.apache.storm.generated.LocalAssignment) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

NodeInfo (org.apache.storm.generated.NodeInfo)30 HashMap (java.util.HashMap)21 ArrayList (java.util.ArrayList)18 List (java.util.List)18 Map (java.util.Map)15 Assignment (org.apache.storm.generated.Assignment)13 HashSet (java.util.HashSet)10 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)9 ImmutableMap (org.apache.storm.shade.com.google.common.collect.ImmutableMap)9 TimeCacheMap (org.apache.storm.utils.TimeCacheMap)9 IOException (java.io.IOException)7 NavigableMap (java.util.NavigableMap)7 InvalidTopologyException (org.apache.storm.generated.InvalidTopologyException)7 WorkerResources (org.apache.storm.generated.WorkerResources)7 WorkerSlot (org.apache.storm.scheduler.WorkerSlot)7 RotatingMap (org.apache.storm.utils.RotatingMap)7 InterruptedIOException (java.io.InterruptedIOException)6 BindException (java.net.BindException)6 IStormClusterState (org.apache.storm.cluster.IStormClusterState)6 AlreadyAliveException (org.apache.storm.generated.AlreadyAliveException)6