Search in sources :

Example 96 with StormTopology

use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.

the class FlowTopologyTest method setupOnce.

@BeforeClass
public static void setupOnce() throws Exception {
    AbstractStormTest.setupOnce();
    flowTopology = new FlowTopology(makeLaunchEnvironment(), new PathComputerAuth());
    topologyConfig = flowTopology.getConfig();
    StormTopology stormTopology = flowTopology.createTopology();
    Config config = stormConfig();
    cluster.submitTopology(FlowTopologyTest.class.getSimpleName(), config, stormTopology);
    nbConsumer = new TestKafkaConsumer(topologyConfig.getKafkaNorthboundTopic(), Destination.NORTHBOUND, kafkaProperties(UUID.nameUUIDFromBytes(Destination.NORTHBOUND.toString().getBytes()).toString()));
    nbConsumer.start();
    ofsConsumer = new TestKafkaConsumer(topologyConfig.getKafkaSpeakerTopic(), Destination.CONTROLLER, kafkaProperties(UUID.nameUUIDFromBytes(Destination.CONTROLLER.toString().getBytes()).toString()));
    ofsConsumer.start();
    cacheConsumer = new TestKafkaConsumer(topologyConfig.getKafkaTopoCacheTopic(), null, kafkaProperties(UUID.nameUUIDFromBytes(Destination.TOPOLOGY_ENGINE.toString().getBytes()).toString()));
    cacheConsumer.start();
    // teResponseConsumer = new TestKafkaConsumer(topologyConfig.getKafkaTopoEngTopic(),
    teResponseConsumer = new TestKafkaConsumer(topologyConfig.getKafkaFlowTopic(), Destination.WFM, kafkaProperties(UUID.nameUUIDFromBytes(Destination.WFM.toString().getBytes()).toString()));
    teResponseConsumer.start();
    ctrlConsumer = new TestKafkaConsumer(flowTopology.getConfig().getKafkaCtrlTopic(), Destination.CTRL_CLIENT, kafkaProperties(UUID.nameUUIDFromBytes(Destination.CTRL_CLIENT.toString().getBytes()).toString()));
    ctrlConsumer.start();
    Utils.sleep(10000);
}
Also used : TestKafkaConsumer(org.openkilda.wfm.topology.TestKafkaConsumer) TopologyConfig(org.openkilda.wfm.topology.TopologyConfig) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) BeforeClass(org.junit.BeforeClass)

Example 97 with StormTopology

use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.

the class OpenTSDBTopologyTest method shouldSendDatapointRequestsOnlyOnce.

@Ignore
@Test
public void shouldSendDatapointRequestsOnlyOnce() throws Exception {
    Datapoint datapoint = new Datapoint("metric", timestamp, Collections.emptyMap(), 123);
    String jsonDatapoint = MAPPER.writeValueAsString(datapoint);
    MockedSources sources = new MockedSources();
    sources.addMockData(Topic.OTSDB + "-spout", new Values(jsonDatapoint), new Values(jsonDatapoint));
    completeTopologyParam.setMockedSources(sources);
    Testing.withTrackedCluster(clusterParam, (cluster) -> {
        OpenTSDBTopology topology = new TestingTargetTopology(new TestingKafkaBolt());
        StormTopology stormTopology = topology.createTopology();
        Testing.completeTopology(cluster, stormTopology, completeTopologyParam);
    });
    // verify that request is sent to OpenTSDB server once
    mockServer.verify(HttpRequest.request(), VerificationTimes.exactly(1));
}
Also used : Datapoint(org.openkilda.messaging.info.Datapoint) MockedSources(org.apache.storm.testing.MockedSources) StormTopology(org.apache.storm.generated.StormTopology) Values(org.apache.storm.tuple.Values) TestingKafkaBolt(org.openkilda.wfm.topology.TestingKafkaBolt) Ignore(org.junit.Ignore) StableAbstractStormTest(org.openkilda.wfm.StableAbstractStormTest) Test(org.junit.Test)

Example 98 with StormTopology

use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.

the class OpenTSDBTopologyTest method shouldSuccessfulSendDatapoint.

@Ignore
@Test
public void shouldSuccessfulSendDatapoint() throws Exception {
    Datapoint datapoint = new Datapoint("metric", timestamp, Collections.emptyMap(), 123);
    MockedSources sources = new MockedSources();
    // TODO: rather than use Topic.OTSDB, grab it from the TopologyConfig object (which does
    // not exist at this point in the code.
    sources.addMockData(Topic.OTSDB + "-spout", new Values(MAPPER.writeValueAsString(datapoint)));
    completeTopologyParam.setMockedSources(sources);
    Testing.withTrackedCluster(clusterParam, (cluster) -> {
        OpenTSDBTopology topology = new TestingTargetTopology(new TestingKafkaBolt());
        StormTopology stormTopology = topology.createTopology();
        Map result = Testing.completeTopology(cluster, stormTopology, completeTopologyParam);
    });
    // verify that request is sent to OpenTSDB server
    mockServer.verify(HttpRequest.request(), VerificationTimes.exactly(1));
}
Also used : Datapoint(org.openkilda.messaging.info.Datapoint) MockedSources(org.apache.storm.testing.MockedSources) StormTopology(org.apache.storm.generated.StormTopology) Values(org.apache.storm.tuple.Values) TestingKafkaBolt(org.openkilda.wfm.topology.TestingKafkaBolt) Map(java.util.Map) Ignore(org.junit.Ignore) StableAbstractStormTest(org.openkilda.wfm.StableAbstractStormTest) Test(org.junit.Test)

Example 99 with StormTopology

use of org.apache.storm.generated.StormTopology in project storm by apache.

the class StormSqlImpl method submit.

@Override
public void submit(String name, Iterable<String> statements, Map<String, Object> topoConf, SubmitOptions opts, StormSubmitter.ProgressListener progressListener, String asUser) throws Exception {
    for (String sql : statements) {
        StormParser parser = new StormParser(sql);
        SqlNode node = parser.impl().parseSqlStmtEof();
        if (node instanceof SqlCreateTable) {
            sqlContext.interpretCreateTable((SqlCreateTable) node);
        } else if (node instanceof SqlCreateFunction) {
            sqlContext.interpretCreateFunction((SqlCreateFunction) node);
        } else {
            AbstractStreamsProcessor processor = sqlContext.compileSql(sql);
            StormTopology topo = processor.build();
            Path jarPath = null;
            try {
                // QueryPlanner on Streams mode configures the topology with compiled classes,
                // so we need to add new classes into topology jar
                // Topology will be serialized and sent to Nimbus, and deserialized and executed in workers.
                jarPath = Files.createTempFile("storm-sql", ".jar");
                System.setProperty("storm.jar", jarPath.toString());
                packageTopology(jarPath, processor);
                StormSubmitter.submitTopologyAs(name, topoConf, topo, opts, progressListener, asUser);
            } finally {
                if (jarPath != null) {
                    Files.delete(jarPath);
                }
            }
        }
    }
}
Also used : Path(java.nio.file.Path) StormTopology(org.apache.storm.generated.StormTopology) SqlCreateFunction(org.apache.storm.sql.parser.SqlCreateFunction) StormParser(org.apache.storm.sql.parser.StormParser) SqlCreateTable(org.apache.storm.sql.parser.SqlCreateTable) SqlNode(org.apache.calcite.sql.SqlNode)

Example 100 with StormTopology

use of org.apache.storm.generated.StormTopology in project storm by apache.

the class TestPlanCompiler method testNested.

@Test
public void testNested() throws Exception {
    int EXPECTED_VALUE_SIZE = 1;
    String sql = "SELECT ID, MAPFIELD['c'], NESTEDMAPFIELD, ARRAYFIELD " + "FROM FOO " + "WHERE NESTEDMAPFIELD['a']['b'] = 2 AND ARRAYFIELD[2] = 200";
    TestCompilerUtils.CalciteState state = TestCompilerUtils.sqlOverNestedTable(sql);
    final Map<String, ISqlStreamsDataSource> data = new HashMap<>();
    data.put("FOO", new TestUtils.MockSqlStreamsNestedDataSource());
    QueryPlanner planner = new QueryPlanner(state.schema());
    AbstractStreamsProcessor proc = planner.compile(data, sql);
    // inject output bolt
    proc.outputStream().to(new TestUtils.MockBolt());
    final StormTopology topo = proc.build();
    SqlTestUtil.runStormTopology(cluster, TestUtils.MockBolt.getCollectedValues(), EXPECTED_VALUE_SIZE, proc, topo);
    Map<String, Integer> map = ImmutableMap.of("b", 2, "c", 4);
    Map<String, Map<String, Integer>> nestedMap = ImmutableMap.of("a", map);
    Assert.assertArrayEquals(new Values[] { new Values(2, 4, nestedMap, Arrays.asList(100, 200, 300)) }, TestUtils.MockBolt.getCollectedValues().toArray());
}
Also used : HashMap(java.util.HashMap) StormTopology(org.apache.storm.generated.StormTopology) Values(org.apache.storm.tuple.Values) QueryPlanner(org.apache.storm.sql.planner.streams.QueryPlanner) TestUtils(org.apache.storm.sql.TestUtils) AbstractStreamsProcessor(org.apache.storm.sql.AbstractStreamsProcessor) ISqlStreamsDataSource(org.apache.storm.sql.runtime.ISqlStreamsDataSource) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Aggregations

StormTopology (org.apache.storm.generated.StormTopology)162 Config (org.apache.storm.Config)72 HashMap (java.util.HashMap)67 Test (org.junit.Test)59 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)44 Map (java.util.Map)35 ArrayList (java.util.ArrayList)29 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)27 Test (org.junit.jupiter.api.Test)26 List (java.util.List)24 Bolt (org.apache.storm.generated.Bolt)23 Values (org.apache.storm.tuple.Values)23 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)22 Cluster (org.apache.storm.scheduler.Cluster)22 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)22 Topologies (org.apache.storm.scheduler.Topologies)22 Fields (org.apache.storm.tuple.Fields)22 INimbus (org.apache.storm.scheduler.INimbus)21 TopologyDef (org.apache.storm.flux.model.TopologyDef)20 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)20