Search in sources :

Example 1 with SupervisorDetails

use of org.apache.storm.scheduler.SupervisorDetails in project storm by apache.

the class Nimbus method readAllSupervisorDetails.

/**
     * @param superToDeadPorts dead ports on the supervisor
     * @param topologies all of the topologies
     * @param missingAssignmentTopologies topologies that need assignments
     * @return a map: {supervisor-id SupervisorDetails}
     */
private Map<String, SupervisorDetails> readAllSupervisorDetails(Map<String, Set<Long>> superToDeadPorts, Topologies topologies, Collection<String> missingAssignmentTopologies) {
    Map<String, SupervisorDetails> ret = new HashMap<>();
    IStormClusterState state = stormClusterState;
    Map<String, SupervisorInfo> superInfos = state.allSupervisorInfo();
    List<SupervisorDetails> superDetails = new ArrayList<>();
    for (Entry<String, SupervisorInfo> entry : superInfos.entrySet()) {
        SupervisorInfo info = entry.getValue();
        superDetails.add(new SupervisorDetails(entry.getKey(), info.get_meta(), info.get_resources_map()));
    }
    // Note that allSlotsAvailableForScheduling
    // only uses the supervisor-details. The rest of the arguments
    // are there to satisfy the INimbus interface.
    Map<String, Set<Long>> superToPorts = new HashMap<>();
    for (WorkerSlot slot : inimbus.allSlotsAvailableForScheduling(superDetails, topologies, new HashSet<>(missingAssignmentTopologies))) {
        String superId = slot.getNodeId();
        Set<Long> ports = superToPorts.get(superId);
        if (ports == null) {
            ports = new HashSet<>();
            superToPorts.put(superId, ports);
        }
        ports.add((long) slot.getPort());
    }
    for (Entry<String, SupervisorInfo> entry : superInfos.entrySet()) {
        String superId = entry.getKey();
        SupervisorInfo info = entry.getValue();
        String hostname = info.get_hostname();
        // Hide the dead-ports from the all-ports
        // these dead-ports can be reused in next round of assignments
        Set<Long> deadPorts = superToDeadPorts.get(superId);
        Set<Long> allPorts = superToPorts.get(superId);
        if (allPorts == null) {
            allPorts = new HashSet<>();
        } else {
            allPorts = new HashSet<>(allPorts);
        }
        if (deadPorts != null) {
            allPorts.removeAll(deadPorts);
        }
        ret.put(superId, new SupervisorDetails(superId, hostname, info.get_scheduler_meta(), allPorts, info.get_resources_map()));
    }
    return ret;
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) SupervisorInfo(org.apache.storm.generated.SupervisorInfo) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) AtomicLong(java.util.concurrent.atomic.AtomicLong) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) IStormClusterState(org.apache.storm.cluster.IStormClusterState)

Example 2 with SupervisorDetails

use of org.apache.storm.scheduler.SupervisorDetails in project storm by apache.

the class Nimbus method mkAssignments.

private void mkAssignments(String scratchTopoId) throws Exception {
    if (!isLeader()) {
        LOG.info("not a leader, skipping assignments");
        return;
    }
    // get existing assignment (just the topologyToExecutorToNodePort map) -> default to {}
    // filter out ones which have a executor timeout
    // figure out available slots on cluster. add to that the used valid slots to get total slots. figure out how many executors should be in each slot (e.g., 4, 4, 4, 5)
    // only keep existing slots that satisfy one of those slots. for rest, reassign them across remaining slots
    // edge case for slots with no executor timeout but with supervisor timeout... just treat these as valid slots that can be reassigned to. worst comes to worse the executor will timeout and won't assign here next time around
    IStormClusterState state = stormClusterState;
    //read all the topologies
    Map<String, StormBase> bases;
    Map<String, TopologyDetails> tds = new HashMap<>();
    synchronized (submitLock) {
        bases = state.topologyBases();
        for (Iterator<Entry<String, StormBase>> it = bases.entrySet().iterator(); it.hasNext(); ) {
            Entry<String, StormBase> entry = it.next();
            String id = entry.getKey();
            try {
                tds.put(id, readTopologyDetails(id, entry.getValue()));
            } catch (KeyNotFoundException e) {
                //A race happened and it is probably not running
                it.remove();
            }
        }
    }
    Topologies topologies = new Topologies(tds);
    List<String> assignedTopologyIds = state.assignments(null);
    Map<String, Assignment> existingAssignments = new HashMap<>();
    for (String id : assignedTopologyIds) {
        // will be treated as free slot in the scheduler code.
        if (!id.equals(scratchTopoId)) {
            existingAssignments.put(id, state.assignmentInfo(id, null));
        }
    }
    // make the new assignments for topologies
    Map<String, SchedulerAssignment> newSchedulerAssignments = null;
    synchronized (schedLock) {
        newSchedulerAssignments = computeNewSchedulerAssignments(existingAssignments, topologies, bases, scratchTopoId);
        Map<String, Map<List<Long>, List<Object>>> topologyToExecutorToNodePort = computeNewTopoToExecToNodePort(newSchedulerAssignments, existingAssignments);
        for (String id : assignedTopologyIds) {
            if (!topologyToExecutorToNodePort.containsKey(id)) {
                topologyToExecutorToNodePort.put(id, null);
            }
        }
        Map<String, Map<List<Object>, List<Double>>> newAssignedWorkerToResources = computeTopoToNodePortToResources(newSchedulerAssignments);
        int nowSecs = Time.currentTimeSecs();
        Map<String, SupervisorDetails> basicSupervisorDetailsMap = basicSupervisorDetailsMap(state);
        //construct the final Assignments by adding start-times etc into it
        Map<String, Assignment> newAssignments = new HashMap<>();
        for (Entry<String, Map<List<Long>, List<Object>>> entry : topologyToExecutorToNodePort.entrySet()) {
            String topoId = entry.getKey();
            Map<List<Long>, List<Object>> execToNodePort = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            Set<String> allNodes = new HashSet<>();
            if (execToNodePort != null) {
                for (List<Object> nodePort : execToNodePort.values()) {
                    allNodes.add((String) nodePort.get(0));
                }
            }
            Map<String, String> allNodeHost = new HashMap<>();
            if (existingAssignment != null) {
                allNodeHost.putAll(existingAssignment.get_node_host());
            }
            for (String node : allNodes) {
                String host = inimbus.getHostName(basicSupervisorDetailsMap, node);
                if (host != null) {
                    allNodeHost.put(node, host);
                }
            }
            Map<List<Long>, NodeInfo> execNodeInfo = null;
            if (existingAssignment != null) {
                execNodeInfo = existingAssignment.get_executor_node_port();
            }
            List<List<Long>> reassignExecutors = changedExecutors(execNodeInfo, execToNodePort);
            Map<List<Long>, Long> startTimes = new HashMap<>();
            if (existingAssignment != null) {
                startTimes.putAll(existingAssignment.get_executor_start_time_secs());
            }
            for (List<Long> id : reassignExecutors) {
                startTimes.put(id, (long) nowSecs);
            }
            Map<List<Object>, List<Double>> workerToResources = newAssignedWorkerToResources.get(topoId);
            Assignment newAssignment = new Assignment((String) conf.get(Config.STORM_LOCAL_DIR));
            Map<String, String> justAssignedKeys = new HashMap<>(allNodeHost);
            //Modifies justAssignedKeys
            justAssignedKeys.keySet().retainAll(allNodes);
            newAssignment.set_node_host(justAssignedKeys);
            //convert NodePort to NodeInfo (again!!!).
            Map<List<Long>, NodeInfo> execToNodeInfo = new HashMap<>();
            for (Entry<List<Long>, List<Object>> execAndNodePort : execToNodePort.entrySet()) {
                List<Object> nodePort = execAndNodePort.getValue();
                NodeInfo ni = new NodeInfo();
                ni.set_node((String) nodePort.get(0));
                ni.add_to_port((Long) nodePort.get(1));
                execToNodeInfo.put(execAndNodePort.getKey(), ni);
            }
            newAssignment.set_executor_node_port(execToNodeInfo);
            newAssignment.set_executor_start_time_secs(startTimes);
            //do another conversion (lets just make this all common)
            Map<NodeInfo, WorkerResources> workerResources = new HashMap<>();
            for (Entry<List<Object>, List<Double>> wr : workerToResources.entrySet()) {
                List<Object> nodePort = wr.getKey();
                NodeInfo ni = new NodeInfo();
                ni.set_node((String) nodePort.get(0));
                ni.add_to_port((Long) nodePort.get(1));
                List<Double> r = wr.getValue();
                WorkerResources resources = new WorkerResources();
                resources.set_mem_on_heap(r.get(0));
                resources.set_mem_off_heap(r.get(1));
                resources.set_cpu(r.get(2));
                workerResources.put(ni, resources);
            }
            newAssignment.set_worker_resources(workerResources);
            newAssignments.put(topoId, newAssignment);
        }
        if (!newAssignments.equals(existingAssignments)) {
            LOG.debug("RESETTING id->resources and id->worker-resources cache!");
            idToResources.set(new HashMap<>());
            idToWorkerResources.set(new HashMap<>());
        }
        // only log/set when there's been a change to the assignment
        for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
            String topoId = entry.getKey();
            Assignment assignment = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            //NOT Used TopologyDetails topologyDetails = topologies.getById(topoId);
            if (assignment.equals(existingAssignment)) {
                LOG.debug("Assignment for {} hasn't changed", topoId);
            } else {
                LOG.info("Setting new assignment for topology id {}: {}", topoId, assignment);
                state.setAssignment(topoId, assignment);
            }
        }
        Map<String, Collection<WorkerSlot>> addedSlots = new HashMap<>();
        for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
            String topoId = entry.getKey();
            Assignment assignment = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            if (existingAssignment == null) {
                existingAssignment = new Assignment();
                existingAssignment.set_executor_node_port(new HashMap<>());
                existingAssignment.set_executor_start_time_secs(new HashMap<>());
            }
            Set<WorkerSlot> newSlots = newlyAddedSlots(existingAssignment, assignment);
            addedSlots.put(topoId, newSlots);
        }
        inimbus.assignSlots(topologies, addedSlots);
    }
}
Also used : HashMap(java.util.HashMap) StormBase(org.apache.storm.generated.StormBase) Assignment(org.apache.storm.generated.Assignment) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) ArrayList(java.util.ArrayList) List(java.util.List) IStormClusterState(org.apache.storm.cluster.IStormClusterState) HashSet(java.util.HashSet) WorkerResources(org.apache.storm.generated.WorkerResources) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) NodeInfo(org.apache.storm.generated.NodeInfo) AtomicLong(java.util.concurrent.atomic.AtomicLong) Collection(java.util.Collection) Map(java.util.Map) TimeCacheMap(org.apache.storm.utils.TimeCacheMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Entry(java.util.Map.Entry) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) DataPoint(org.apache.storm.metric.api.DataPoint) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException)

Example 3 with SupervisorDetails

use of org.apache.storm.scheduler.SupervisorDetails in project storm by apache.

the class TestResourceAwareScheduler method testTopologyWorkerMaxHeapSize.

@Test
public void testTopologyWorkerMaxHeapSize() {
    // Test1: If RAS spreads executors across multiple workers based on the set limit for a worker used by the topology
    INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
    Map<String, Number> resourceMap = new HashMap<>();
    resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
    resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 2000.0);
    Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(2, 2, resourceMap);
    TopologyBuilder builder1 = new TopologyBuilder();
    builder1.setSpout("wordSpout1", new TestWordSpout(), 4);
    StormTopology stormTopology1 = builder1.createTopology();
    Config config1 = new Config();
    config1.putAll(defaultTopologyConf);
    config1.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 128.0);
    Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
    TopologyDetails topology1 = new TopologyDetails("topology1", config1, stormTopology1, 1, executorMap1, 0);
    Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    Map<String, TopologyDetails> topoMap = new HashMap<>();
    topoMap.put(topology1.getId(), topology1);
    Topologies topologies = new Topologies(topoMap);
    rs.prepare(config1);
    rs.schedule(topologies, cluster);
    Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology1.getId()));
    Assert.assertEquals(4, cluster.getAssignedNumWorkers(topology1));
    // Test2: test when no more workers are available due to topology worker max heap size limit but there is memory is still available
    // wordSpout2 is going to contain 5 executors that needs scheduling. Each of those executors has a memory requirement of 128.0 MB
    // The cluster contains 4 free WorkerSlots. For this topolology each worker is limited to a max heap size of 128.0
    // Thus, one executor not going to be able to get scheduled thus failing the scheduling of this topology and no executors of this topology will be scheduleded
    TopologyBuilder builder2 = new TopologyBuilder();
    builder2.setSpout("wordSpout2", new TestWordSpout(), 5);
    StormTopology stormTopology2 = builder2.createTopology();
    Config config2 = new Config();
    config2.putAll(defaultTopologyConf);
    config2.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 128.0);
    Map<ExecutorDetails, String> executorMap2 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology2);
    TopologyDetails topology2 = new TopologyDetails("topology2", config2, stormTopology2, 1, executorMap2, 0);
    cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config2);
    topoMap = new HashMap<>();
    topoMap.put(topology2.getId(), topology2);
    topologies = new Topologies(topoMap);
    rs.prepare(config2);
    rs.schedule(topologies, cluster);
    Assert.assertEquals("Not enough resources to schedule - 0/5 executors scheduled", cluster.getStatusMap().get(topology2.getId()));
    Assert.assertEquals(5, cluster.getUnassignedExecutors(topology2).size());
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) TestWordSpout(org.apache.storm.testing.TestWordSpout) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Test(org.junit.Test)

Example 4 with SupervisorDetails

use of org.apache.storm.scheduler.SupervisorDetails in project storm by apache.

the class TestResourceAwareScheduler method sanityTestOfScheduling.

@Test
public void sanityTestOfScheduling() {
    INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
    Map<String, Number> resourceMap = new HashMap<>();
    resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
    resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 2000.0);
    Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(1, 2, resourceMap);
    Config config = new Config();
    config.putAll(defaultTopologyConf);
    Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    TopologyDetails topology1 = TestUtilsForResourceAwareScheduler.getTopology("topology1", config, 1, 1, 1, 1, 0, 0);
    Map<String, TopologyDetails> topoMap = new HashMap<>();
    topoMap.put(topology1.getId(), topology1);
    Topologies topologies = new Topologies(topoMap);
    rs.prepare(config);
    rs.schedule(topologies, cluster);
    SchedulerAssignment assignment = cluster.getAssignmentById(topology1.getId());
    Set<WorkerSlot> assignedSlots = assignment.getSlots();
    Set<String> nodesIDs = new HashSet<>();
    for (WorkerSlot slot : assignedSlots) {
        nodesIDs.add(slot.getNodeId());
    }
    Collection<ExecutorDetails> executors = assignment.getExecutors();
    Assert.assertEquals(1, assignedSlots.size());
    Assert.assertEquals(1, nodesIDs.size());
    Assert.assertEquals(2, executors.size());
    Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology1.getId()));
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) Config(org.apache.storm.Config) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 5 with SupervisorDetails

use of org.apache.storm.scheduler.SupervisorDetails in project storm by apache.

the class TestResourceAwareScheduler method TestMultipleSpoutsAndCyclicTopologies.

/**
     * Test multiple spouts and cyclic topologies
     */
@Test
public void TestMultipleSpoutsAndCyclicTopologies() {
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer s1 = builder.setSpout("spout-1", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    SpoutDeclarer s2 = builder.setSpout("spout-2", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    BoltDeclarer b1 = builder.setBolt("bolt-1", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("spout-1").shuffleGrouping("bolt-3");
    BoltDeclarer b2 = builder.setBolt("bolt-2", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-1");
    BoltDeclarer b3 = builder.setBolt("bolt-3", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-2").shuffleGrouping("spout-2");
    INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
    Map<String, Number> resourceMap = new HashMap<String, Number>();
    resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
    resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
    Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(25, 1, resourceMap);
    Config config = new Config();
    config.putAll(Utils.readDefaultConfig());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
    config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
    config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
    StormTopology stormTopology = builder.createTopology();
    TopologyDetails topo = new TopologyDetails("topo-1", config, stormTopology, 0, genExecsAndComps(stormTopology), 0);
    Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
    config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
    Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
    topoMap.put(topo.getId(), topo);
    Topologies topologies = new Topologies(topoMap);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    rs.prepare(config);
    rs.schedule(topologies, cluster);
    Assert.assertTrue("Topo scheduled?", cluster.getAssignmentById(topo.getId()) != null);
    Assert.assertEquals("Topo all executors scheduled?", 25, cluster.getAssignmentById(topo.getId()).getExecutorToSlot().size());
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) Test(org.junit.Test)

Aggregations

HashMap (java.util.HashMap)32 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)32 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)27 Cluster (org.apache.storm.scheduler.Cluster)24 SchedulerAssignmentImpl (org.apache.storm.scheduler.SchedulerAssignmentImpl)24 Topologies (org.apache.storm.scheduler.Topologies)24 INimbus (org.apache.storm.scheduler.INimbus)23 Test (org.junit.Test)23 Config (org.apache.storm.Config)22 Map (java.util.Map)19 WorkerSlot (org.apache.storm.scheduler.WorkerSlot)16 ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)14 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)12 ArrayList (java.util.ArrayList)10 StormTopology (org.apache.storm.generated.StormTopology)8 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)8 HashSet (java.util.HashSet)7 LinkedList (java.util.LinkedList)7 List (java.util.List)7 ResourceAwareScheduler (org.apache.storm.scheduler.resource.ResourceAwareScheduler)6