Search in sources :

Example 1 with TopologyResources

use of org.apache.storm.daemon.nimbus.TopologyResources in project storm by apache.

the class User method getCpuResourceUsedByUser.

public double getCpuResourceUsedByUser(ISchedulingState cluster) {
    double sum = 0.0;
    for (TopologyDetails td : cluster.getTopologies().getTopologiesOwnedBy(userId)) {
        SchedulerAssignment assignment = cluster.getAssignmentById(td.getId());
        if (assignment != null) {
            TopologyResources tr = new TopologyResources(td, assignment);
            sum += tr.getAssignedCpu();
        }
    }
    return sum;
}
Also used : SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) TopologyResources(org.apache.storm.daemon.nimbus.TopologyResources)

Example 2 with TopologyResources

use of org.apache.storm.daemon.nimbus.TopologyResources in project storm by apache.

the class TestDefaultResourceAwareStrategy method testDefaultResourceAwareStrategySharedMemory.

/**
 * test if the scheduling shared memory is correct with/without oneExecutorPerWorker enabled
 */
@ParameterizedTest
@EnumSource(WorkerRestrictionType.class)
public void testDefaultResourceAwareStrategySharedMemory(WorkerRestrictionType schedulingLimitation) {
    int spoutParallelism = 2;
    int boltParallelism = 2;
    int numBolts = 3;
    double cpuPercent = 10;
    double memoryOnHeap = 10;
    double memoryOffHeap = 10;
    double sharedOnHeapWithinWorker = 400;
    double sharedOffHeapWithinNode = 700;
    double sharedOffHeapWithinWorker = 600;
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism);
    builder.setBolt("bolt-1", new TestBolt(), boltParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "bolt-1 shared off heap within worker")).shuffleGrouping("spout");
    builder.setBolt("bolt-2", new TestBolt(), boltParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "bolt-2 shared off heap within node")).shuffleGrouping("bolt-1");
    builder.setBolt("bolt-3", new TestBolt(), boltParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "bolt-3 shared on heap within worker")).shuffleGrouping("bolt-2");
    StormTopology stormToplogy = builder.createTopology();
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 2000);
    Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
    conf.put(Config.TOPOLOGY_PRIORITY, 0);
    conf.put(Config.TOPOLOGY_NAME, "testTopology");
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
    switch(schedulingLimitation) {
        case WORKER_RESTRICTION_ONE_EXECUTOR:
            conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, true);
            break;
        case WORKER_RESTRICTION_ONE_COMPONENT:
            conf.put(Config.TOPOLOGY_RAS_ONE_COMPONENT_PER_WORKER, true);
            break;
    }
    TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
    Topologies topologies = new Topologies(topo);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(conf, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    for (Entry<String, SupervisorResources> entry : cluster.getSupervisorsResourcesMap().entrySet()) {
        String supervisorId = entry.getKey();
        SupervisorResources resources = entry.getValue();
        assertTrue(supervisorId, resources.getTotalCpu() >= resources.getUsedCpu());
        assertTrue(supervisorId, resources.getTotalMem() >= resources.getUsedMem());
    }
    int totalNumberOfTasks = spoutParallelism + boltParallelism * numBolts;
    SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
    TopologyResources topologyResources = cluster.getTopologyResourcesMap().get(topo.getId());
    long numNodes = assignment.getSlotToExecutors().keySet().stream().map(WorkerSlot::getNodeId).distinct().count();
    String assignmentString = "Assignments:\n\t" + assignment.getSlotToExecutors().entrySet().stream().map(x -> String.format("Node=%s, components=%s", x.getKey().getNodeId(), x.getValue().stream().map(y -> topo.getComponentFromExecutor(y)).collect(Collectors.joining(",")))).collect(Collectors.joining("\n\t"));
    if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_NONE) {
        // Everything should fit in a single slot
        double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
        double totalExpectedOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeapWithinWorker;
        double totalExpectedWorkerOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWithinWorker;
        assertThat(assignment.getSlots().size(), is(1));
        WorkerSlot ws = assignment.getSlots().iterator().next();
        String nodeId = ws.getNodeId();
        assertThat(assignment.getNodeIdToTotalSharedOffHeapNodeMemory().size(), is(1));
        assertThat(assignment.getNodeIdToTotalSharedOffHeapNodeMemory().get(nodeId), closeTo(sharedOffHeapWithinNode, 0.01));
        assertThat(assignment.getScheduledResources().size(), is(1));
        WorkerResources resources = assignment.getScheduledResources().get(ws);
        assertThat(resources.get_cpu(), closeTo(totalExpectedCPU, 0.01));
        assertThat(resources.get_mem_on_heap(), closeTo(totalExpectedOnHeap, 0.01));
        assertThat(resources.get_mem_off_heap(), closeTo(totalExpectedWorkerOffHeap, 0.01));
        assertThat(resources.get_shared_mem_on_heap(), closeTo(sharedOnHeapWithinWorker, 0.01));
        assertThat(resources.get_shared_mem_off_heap(), closeTo(sharedOffHeapWithinWorker, 0.01));
    } else if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_ONE_EXECUTOR) {
        double expectedMemOnHeap = (totalNumberOfTasks * memoryOnHeap) + 2 * sharedOnHeapWithinWorker;
        double expectedMemOffHeap = (totalNumberOfTasks * memoryOffHeap) + 2 * sharedOffHeapWithinWorker + 2 * sharedOffHeapWithinNode;
        double expectedMemSharedOnHeap = 2 * sharedOnHeapWithinWorker;
        double expectedMemSharedOffHeap = 2 * sharedOffHeapWithinWorker + 2 * sharedOffHeapWithinNode;
        double expectedMemNonSharedOnHeap = totalNumberOfTasks * memoryOnHeap;
        double expectedMemNonSharedOffHeap = totalNumberOfTasks * memoryOffHeap;
        assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(expectedMemOnHeap, 0.01));
        assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(expectedMemOffHeap, 0.01));
        assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(expectedMemSharedOnHeap, 0.01));
        assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(expectedMemSharedOffHeap, 0.01));
        assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(expectedMemNonSharedOnHeap, 0.01));
        assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(expectedMemNonSharedOffHeap, 0.01));
        double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
        assertThat(topologyResources.getAssignedCpu(), closeTo(totalExpectedCPU, 0.01));
        int numAssignedWorkers = cluster.getAssignedNumWorkers(topo);
        assertThat(numAssignedWorkers, is(8));
        assertThat(assignment.getSlots().size(), is(8));
        assertThat(assignmentString, numNodes, is(2L));
    } else if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_ONE_COMPONENT) {
        double expectedMemOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeapWithinWorker;
        double expectedMemOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWithinWorker + sharedOffHeapWithinNode;
        double expectedMemSharedOnHeap = sharedOnHeapWithinWorker;
        double expectedMemSharedOffHeap = sharedOffHeapWithinWorker + sharedOffHeapWithinNode;
        double expectedMemNonSharedOnHeap = totalNumberOfTasks * memoryOnHeap;
        double expectedMemNonSharedOffHeap = totalNumberOfTasks * memoryOffHeap;
        assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(expectedMemOnHeap, 0.01));
        assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(expectedMemOffHeap, 0.01));
        assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(expectedMemSharedOnHeap, 0.01));
        assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(expectedMemSharedOffHeap, 0.01));
        assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(expectedMemNonSharedOnHeap, 0.01));
        assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(expectedMemNonSharedOffHeap, 0.01));
        double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
        assertThat(topologyResources.getAssignedCpu(), closeTo(totalExpectedCPU, 0.01));
        int numAssignedWorkers = cluster.getAssignedNumWorkers(topo);
        assertThat(numAssignedWorkers, is(4));
        assertThat(assignment.getSlots().size(), is(4));
        assertThat(numNodes, is(1L));
    }
}
Also used : Arrays(java.util.Arrays) LoggerFactory(org.slf4j.LoggerFactory) INimbus(org.apache.storm.scheduler.INimbus) SupervisorResources(org.apache.storm.scheduler.SupervisorResources) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Matchers.closeTo(org.hamcrest.Matchers.closeTo) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Map(java.util.Map) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) NodeSorterHostProximity(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.NodeSorterHostProximity) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) DNSToSwitchMapping(org.apache.storm.networktopography.DNSToSwitchMapping) Collection(java.util.Collection) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) Collectors(java.util.stream.Collectors) SharedOnHeap(org.apache.storm.topology.SharedOnHeap) Test(org.junit.jupiter.api.Test) WorkerResources(org.apache.storm.generated.WorkerResources) List(java.util.List) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) Entry(java.util.Map.Entry) Config(org.apache.storm.Config) Matchers.is(org.hamcrest.Matchers.is) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) StormCommon(org.apache.storm.daemon.StormCommon) ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) IScheduler(org.apache.storm.scheduler.IScheduler) RasNode(org.apache.storm.scheduler.resource.RasNode) SharedOffHeapWithinNode(org.apache.storm.topology.SharedOffHeapWithinNode) NodeSorter(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.NodeSorter) EnumSource(org.junit.jupiter.params.provider.EnumSource) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Topologies(org.apache.storm.scheduler.Topologies) ServerUtils(org.apache.storm.utils.ServerUtils) StormTopology(org.apache.storm.generated.StormTopology) NormalizedResourcesExtension(org.apache.storm.scheduler.resource.normalization.NormalizedResourcesExtension) LinkedList(java.util.LinkedList) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ValueSource(org.junit.jupiter.params.provider.ValueSource) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) SharedOffHeapWithinWorker(org.apache.storm.topology.SharedOffHeapWithinWorker) INodeSorter(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.INodeSorter) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) TopologyResources(org.apache.storm.daemon.nimbus.TopologyResources) Cluster(org.apache.storm.scheduler.Cluster) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) SchedulingResult(org.apache.storm.scheduler.resource.SchedulingResult) Nimbus(org.apache.storm.daemon.nimbus.Nimbus) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Assert(org.junit.Assert) Collections(java.util.Collections) SharedOnHeap(org.apache.storm.topology.SharedOnHeap) SharedOffHeapWithinWorker(org.apache.storm.topology.SharedOffHeapWithinWorker) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) SharedOffHeapWithinNode(org.apache.storm.topology.SharedOffHeapWithinNode) SupervisorResources(org.apache.storm.scheduler.SupervisorResources) WorkerResources(org.apache.storm.generated.WorkerResources) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) TopologyResources(org.apache.storm.daemon.nimbus.TopologyResources) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 3 with TopologyResources

use of org.apache.storm.daemon.nimbus.TopologyResources in project storm by apache.

the class TestDefaultResourceAwareStrategy method testMultipleSharedMemoryWithOneExecutorPerWorker.

/*
     * test assigned memory with shared memory types and oneWorkerPerExecutor
     */
@ParameterizedTest
@EnumSource(SharedMemoryType.class)
public void testMultipleSharedMemoryWithOneExecutorPerWorker(SharedMemoryType memoryType) {
    int spoutParallelism = 4;
    double cpuPercent = 10;
    double memoryOnHeap = 10;
    double memoryOffHeap = 10;
    double sharedOnHeapWithinWorker = 450;
    double sharedOffHeapWithinNode = 600;
    double sharedOffHeapWithinWorker = 400;
    TopologyBuilder builder = new TopologyBuilder();
    switch(memoryType) {
        case SHARED_OFF_HEAP_NODE:
            builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "spout shared off heap within node"));
            break;
        case SHARED_OFF_HEAP_WORKER:
            builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "spout shared off heap within worker"));
            break;
        case SHARED_ON_HEAP_WORKER:
            builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "spout shared on heap within worker"));
            break;
    }
    StormTopology stormToplogy = builder.createTopology();
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 1000);
    Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
    conf.put(Config.TOPOLOGY_PRIORITY, 0);
    conf.put(Config.TOPOLOGY_NAME, "testTopology");
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
    conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, true);
    TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
    Topologies topologies = new Topologies(topo);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(conf, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    TopologyResources topologyResources = cluster.getTopologyResourcesMap().get(topo.getId());
    SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
    long numNodes = assignment.getSlotToExecutors().keySet().stream().map(ws -> ws.getNodeId()).distinct().count();
    switch(memoryType) {
        case SHARED_OFF_HEAP_NODE:
            // 4 workers on single node. OffHeapNode memory is shared
            assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
            assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap + sharedOffHeapWithinNode, 0.01));
            assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(0, 0.01));
            assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(sharedOffHeapWithinNode, 0.01));
            assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
            assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
            assertThat(numNodes, is(1L));
            assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
            break;
        case SHARED_OFF_HEAP_WORKER:
            // 4 workers on 2 nodes. OffHeapWorker memory not shared -- consumed 4x, once for each worker)
            assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
            assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * (memoryOffHeap + sharedOffHeapWithinWorker), 0.01));
            assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(0, 0.01));
            assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(spoutParallelism * sharedOffHeapWithinWorker, 0.01));
            assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
            assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
            assertThat(numNodes, is(2L));
            assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
            break;
        case SHARED_ON_HEAP_WORKER:
            // 4 workers on 2 nodes. onHeap memory not shared -- consumed 4x, once for each worker
            assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * (memoryOnHeap + sharedOnHeapWithinWorker), 0.01));
            assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
            assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(spoutParallelism * sharedOnHeapWithinWorker, 0.01));
            assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(0, 0.01));
            assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
            assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
            assertThat(numNodes, is(2L));
            assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
            break;
    }
}
Also used : SharedOffHeapWithinNode(org.apache.storm.topology.SharedOffHeapWithinNode) SharedOnHeap(org.apache.storm.topology.SharedOnHeap) SharedOffHeapWithinWorker(org.apache.storm.topology.SharedOffHeapWithinWorker) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) TopologyResources(org.apache.storm.daemon.nimbus.TopologyResources) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 4 with TopologyResources

use of org.apache.storm.daemon.nimbus.TopologyResources in project storm by apache.

the class User method getMemoryResourceUsedByUser.

public double getMemoryResourceUsedByUser(ISchedulingState cluster) {
    double sum = 0.0;
    for (TopologyDetails td : cluster.getTopologies().getTopologiesOwnedBy(userId)) {
        SchedulerAssignment assignment = cluster.getAssignmentById(td.getId());
        if (assignment != null) {
            TopologyResources tr = new TopologyResources(td, assignment);
            sum += tr.getAssignedMemOnHeap() + tr.getAssignedMemOffHeap();
        }
    }
    return sum;
}
Also used : SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) TopologyResources(org.apache.storm.daemon.nimbus.TopologyResources)

Example 5 with TopologyResources

use of org.apache.storm.daemon.nimbus.TopologyResources in project storm by apache.

the class Cluster method getTopologyResourcesMap.

@Override
public Map<String, TopologyResources> getTopologyResourcesMap() {
    Map<String, TopologyResources> ret = new HashMap<>(assignments.size());
    for (TopologyDetails td : topologies.getTopologies()) {
        String topoId = td.getId();
        SchedulerAssignmentImpl assignment = assignments.get(topoId);
        ret.put(topoId, new TopologyResources(td, assignment));
    }
    return ret;
}
Also used : HashMap(java.util.HashMap) TopologyResources(org.apache.storm.daemon.nimbus.TopologyResources)

Aggregations

TopologyResources (org.apache.storm.daemon.nimbus.TopologyResources)5 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)4 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)4 HashMap (java.util.HashMap)2 Config (org.apache.storm.Config)2 StormTopology (org.apache.storm.generated.StormTopology)2 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)2 Cluster (org.apache.storm.scheduler.Cluster)2 INimbus (org.apache.storm.scheduler.INimbus)2 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)2 Topologies (org.apache.storm.scheduler.Topologies)2 ResourceAwareScheduler (org.apache.storm.scheduler.resource.ResourceAwareScheduler)2 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)2 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)2 SharedOffHeapWithinNode (org.apache.storm.topology.SharedOffHeapWithinNode)2 SharedOffHeapWithinWorker (org.apache.storm.topology.SharedOffHeapWithinWorker)2 SharedOnHeap (org.apache.storm.topology.SharedOnHeap)2 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)2 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)2 EnumSource (org.junit.jupiter.params.provider.EnumSource)2