Search in sources :

Example 1 with StormTopology

use of org.apache.storm.generated.StormTopology in project storm by apache.

the class TestResourceAwareScheduler method TestMultipleSpoutsAndCyclicTopologies.

/**
     * Test multiple spouts and cyclic topologies
     */
@Test
public void TestMultipleSpoutsAndCyclicTopologies() {
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer s1 = builder.setSpout("spout-1", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    SpoutDeclarer s2 = builder.setSpout("spout-2", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    BoltDeclarer b1 = builder.setBolt("bolt-1", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("spout-1").shuffleGrouping("bolt-3");
    BoltDeclarer b2 = builder.setBolt("bolt-2", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-1");
    BoltDeclarer b3 = builder.setBolt("bolt-3", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-2").shuffleGrouping("spout-2");
    INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
    Map<String, Number> resourceMap = new HashMap<String, Number>();
    resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
    resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
    Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(25, 1, resourceMap);
    Config config = new Config();
    config.putAll(Utils.readDefaultConfig());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
    config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
    config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
    StormTopology stormTopology = builder.createTopology();
    TopologyDetails topo = new TopologyDetails("topo-1", config, stormTopology, 0, genExecsAndComps(stormTopology), 0);
    Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
    config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
    Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
    topoMap.put(topo.getId(), topo);
    Topologies topologies = new Topologies(topoMap);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    rs.prepare(config);
    rs.schedule(topologies, cluster);
    Assert.assertTrue("Topo scheduled?", cluster.getAssignmentById(topo.getId()) != null);
    Assert.assertEquals("Topo all executors scheduled?", 25, cluster.getAssignmentById(topo.getId()).getExecutorToSlot().size());
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) Test(org.junit.Test)

Example 2 with StormTopology

use of org.apache.storm.generated.StormTopology in project storm by apache.

the class TestUtilsForResourceAwareScheduler method getTopology.

public static TopologyDetails getTopology(String name, Map config, int numSpout, int numBolt, int spoutParallelism, int boltParallelism, int launchTime, int priority) {
    Config conf = new Config();
    conf.putAll(config);
    conf.put(Config.TOPOLOGY_PRIORITY, priority);
    conf.put(Config.TOPOLOGY_NAME, name);
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
    StormTopology topology = buildTopology(numSpout, numBolt, spoutParallelism, boltParallelism);
    TopologyDetails topo = new TopologyDetails(name + "-" + launchTime, conf, topology, 0, genExecsAndComps(topology), launchTime);
    return topo;
}
Also used : Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) TopologyDetails(org.apache.storm.scheduler.TopologyDetails)

Example 3 with StormTopology

use of org.apache.storm.generated.StormTopology in project storm by apache.

the class Nimbus method computeExecutorToComponent.

private Map<List<Integer>, String> computeExecutorToComponent(String topoId, StormBase base) throws KeyNotFoundException, AuthorizationException, InvalidTopologyException, IOException {
    BlobStore store = blobStore;
    List<List<Integer>> executors = computeExecutors(topoId, base);
    StormTopology topology = readStormTopologyAsNimbus(topoId, store);
    Map<String, Object> topoConf = readTopoConfAsNimbus(topoId, store);
    Map<Integer, String> taskToComponent = StormCommon.stormTaskInfo(topology, topoConf);
    Map<List<Integer>, String> ret = new HashMap<>();
    for (List<Integer> executor : executors) {
        ret.put(executor, taskToComponent.get(executor.get(0)));
    }
    return ret;
}
Also used : HashMap(java.util.HashMap) StormTopology(org.apache.storm.generated.StormTopology) ArrayList(java.util.ArrayList) List(java.util.List) BlobStore(org.apache.storm.blobstore.BlobStore) LocalFsBlobStore(org.apache.storm.blobstore.LocalFsBlobStore)

Example 4 with StormTopology

use of org.apache.storm.generated.StormTopology in project storm by apache.

the class StormCommon method validateIds.

@SuppressWarnings("unchecked")
private static void validateIds(StormTopology topology) throws InvalidTopologyException {
    List<String> componentIds = new ArrayList<>();
    for (StormTopology._Fields field : Thrift.getTopologyFields()) {
        if (!ThriftTopologyUtils.isWorkerHook(field) && !ThriftTopologyUtils.isDependencies(field)) {
            Object value = topology.getFieldValue(field);
            Map<String, Object> componentMap = (Map<String, Object>) value;
            componentIds.addAll(componentMap.keySet());
            for (String id : componentMap.keySet()) {
                if (Utils.isSystemId(id)) {
                    throw new InvalidTopologyException(id + " is not a valid component id.");
                }
            }
            for (Object componentObj : componentMap.values()) {
                ComponentCommon common = getComponentCommon(componentObj);
                Set<String> streamIds = common.get_streams().keySet();
                for (String id : streamIds) {
                    if (Utils.isSystemId(id)) {
                        throw new InvalidTopologyException(id + " is not a valid stream id.");
                    }
                }
            }
        }
    }
    List<String> offending = Utils.getRepeat(componentIds);
    if (!offending.isEmpty()) {
        throw new InvalidTopologyException("Duplicate component ids: " + offending);
    }
}
Also used : ComponentCommon(org.apache.storm.generated.ComponentCommon) StormTopology(org.apache.storm.generated.StormTopology) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) ArrayList(java.util.ArrayList) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 5 with StormTopology

use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.

the class OpenTSDBTopologyTest method shouldSendDatapointRequestsTwice.

@Ignore
@Test
public void shouldSendDatapointRequestsTwice() throws Exception {
    Datapoint datapoint1 = new Datapoint("metric", timestamp, Collections.emptyMap(), 123);
    String jsonDatapoint1 = MAPPER.writeValueAsString(datapoint1);
    Datapoint datapoint2 = new Datapoint("metric", timestamp, Collections.emptyMap(), 456);
    String jsonDatapoint2 = MAPPER.writeValueAsString(datapoint2);
    MockedSources sources = new MockedSources();
    sources.addMockData(Topic.OTSDB + "-spout", new Values(jsonDatapoint1), new Values(jsonDatapoint2));
    completeTopologyParam.setMockedSources(sources);
    Testing.withTrackedCluster(clusterParam, (cluster) -> {
        OpenTSDBTopology topology = new TestingTargetTopology(new TestingKafkaBolt());
        StormTopology stormTopology = topology.createTopology();
        Testing.completeTopology(cluster, stormTopology, completeTopologyParam);
    });
    // verify that request is sent to OpenTSDB server once
    mockServer.verify(HttpRequest.request(), VerificationTimes.exactly(2));
}
Also used : Datapoint(org.openkilda.messaging.info.Datapoint) MockedSources(org.apache.storm.testing.MockedSources) StormTopology(org.apache.storm.generated.StormTopology) Values(org.apache.storm.tuple.Values) TestingKafkaBolt(org.openkilda.wfm.topology.TestingKafkaBolt) Ignore(org.junit.Ignore) StableAbstractStormTest(org.openkilda.wfm.StableAbstractStormTest) Test(org.junit.Test)

Aggregations

StormTopology (org.apache.storm.generated.StormTopology)162 Config (org.apache.storm.Config)72 HashMap (java.util.HashMap)67 Test (org.junit.Test)59 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)44 Map (java.util.Map)35 ArrayList (java.util.ArrayList)29 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)27 Test (org.junit.jupiter.api.Test)26 List (java.util.List)24 Bolt (org.apache.storm.generated.Bolt)23 Values (org.apache.storm.tuple.Values)23 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)22 Cluster (org.apache.storm.scheduler.Cluster)22 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)22 Topologies (org.apache.storm.scheduler.Topologies)22 Fields (org.apache.storm.tuple.Fields)22 INimbus (org.apache.storm.scheduler.INimbus)21 TopologyDef (org.apache.storm.flux.model.TopologyDef)20 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)20