use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.
the class FlowTopologyTest method setupOnce.
@BeforeClass
public static void setupOnce() throws Exception {
AbstractStormTest.setupOnce();
flowTopology = new FlowTopology(makeLaunchEnvironment(), new PathComputerAuth());
topologyConfig = flowTopology.getConfig();
StormTopology stormTopology = flowTopology.createTopology();
Config config = stormConfig();
cluster.submitTopology(FlowTopologyTest.class.getSimpleName(), config, stormTopology);
nbConsumer = new TestKafkaConsumer(topologyConfig.getKafkaNorthboundTopic(), Destination.NORTHBOUND, kafkaProperties(UUID.nameUUIDFromBytes(Destination.NORTHBOUND.toString().getBytes()).toString()));
nbConsumer.start();
ofsConsumer = new TestKafkaConsumer(topologyConfig.getKafkaSpeakerTopic(), Destination.CONTROLLER, kafkaProperties(UUID.nameUUIDFromBytes(Destination.CONTROLLER.toString().getBytes()).toString()));
ofsConsumer.start();
cacheConsumer = new TestKafkaConsumer(topologyConfig.getKafkaTopoCacheTopic(), null, kafkaProperties(UUID.nameUUIDFromBytes(Destination.TOPOLOGY_ENGINE.toString().getBytes()).toString()));
cacheConsumer.start();
// teResponseConsumer = new TestKafkaConsumer(topologyConfig.getKafkaTopoEngTopic(),
teResponseConsumer = new TestKafkaConsumer(topologyConfig.getKafkaFlowTopic(), Destination.WFM, kafkaProperties(UUID.nameUUIDFromBytes(Destination.WFM.toString().getBytes()).toString()));
teResponseConsumer.start();
ctrlConsumer = new TestKafkaConsumer(flowTopology.getConfig().getKafkaCtrlTopic(), Destination.CTRL_CLIENT, kafkaProperties(UUID.nameUUIDFromBytes(Destination.CTRL_CLIENT.toString().getBytes()).toString()));
ctrlConsumer.start();
Utils.sleep(10000);
}
use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.
the class OpenTSDBTopologyTest method shouldSendDatapointRequestsOnlyOnce.
@Ignore
@Test
public void shouldSendDatapointRequestsOnlyOnce() throws Exception {
Datapoint datapoint = new Datapoint("metric", timestamp, Collections.emptyMap(), 123);
String jsonDatapoint = MAPPER.writeValueAsString(datapoint);
MockedSources sources = new MockedSources();
sources.addMockData(Topic.OTSDB + "-spout", new Values(jsonDatapoint), new Values(jsonDatapoint));
completeTopologyParam.setMockedSources(sources);
Testing.withTrackedCluster(clusterParam, (cluster) -> {
OpenTSDBTopology topology = new TestingTargetTopology(new TestingKafkaBolt());
StormTopology stormTopology = topology.createTopology();
Testing.completeTopology(cluster, stormTopology, completeTopologyParam);
});
// verify that request is sent to OpenTSDB server once
mockServer.verify(HttpRequest.request(), VerificationTimes.exactly(1));
}
use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.
the class OpenTSDBTopologyTest method shouldSuccessfulSendDatapoint.
@Ignore
@Test
public void shouldSuccessfulSendDatapoint() throws Exception {
Datapoint datapoint = new Datapoint("metric", timestamp, Collections.emptyMap(), 123);
MockedSources sources = new MockedSources();
// TODO: rather than use Topic.OTSDB, grab it from the TopologyConfig object (which does
// not exist at this point in the code.
sources.addMockData(Topic.OTSDB + "-spout", new Values(MAPPER.writeValueAsString(datapoint)));
completeTopologyParam.setMockedSources(sources);
Testing.withTrackedCluster(clusterParam, (cluster) -> {
OpenTSDBTopology topology = new TestingTargetTopology(new TestingKafkaBolt());
StormTopology stormTopology = topology.createTopology();
Map result = Testing.completeTopology(cluster, stormTopology, completeTopologyParam);
});
// verify that request is sent to OpenTSDB server
mockServer.verify(HttpRequest.request(), VerificationTimes.exactly(1));
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class StormSqlImpl method submit.
@Override
public void submit(String name, Iterable<String> statements, Map<String, Object> topoConf, SubmitOptions opts, StormSubmitter.ProgressListener progressListener, String asUser) throws Exception {
for (String sql : statements) {
StormParser parser = new StormParser(sql);
SqlNode node = parser.impl().parseSqlStmtEof();
if (node instanceof SqlCreateTable) {
sqlContext.interpretCreateTable((SqlCreateTable) node);
} else if (node instanceof SqlCreateFunction) {
sqlContext.interpretCreateFunction((SqlCreateFunction) node);
} else {
AbstractStreamsProcessor processor = sqlContext.compileSql(sql);
StormTopology topo = processor.build();
Path jarPath = null;
try {
// QueryPlanner on Streams mode configures the topology with compiled classes,
// so we need to add new classes into topology jar
// Topology will be serialized and sent to Nimbus, and deserialized and executed in workers.
jarPath = Files.createTempFile("storm-sql", ".jar");
System.setProperty("storm.jar", jarPath.toString());
packageTopology(jarPath, processor);
StormSubmitter.submitTopologyAs(name, topoConf, topo, opts, progressListener, asUser);
} finally {
if (jarPath != null) {
Files.delete(jarPath);
}
}
}
}
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestPlanCompiler method testNested.
@Test
public void testNested() throws Exception {
int EXPECTED_VALUE_SIZE = 1;
String sql = "SELECT ID, MAPFIELD['c'], NESTEDMAPFIELD, ARRAYFIELD " + "FROM FOO " + "WHERE NESTEDMAPFIELD['a']['b'] = 2 AND ARRAYFIELD[2] = 200";
TestCompilerUtils.CalciteState state = TestCompilerUtils.sqlOverNestedTable(sql);
final Map<String, ISqlStreamsDataSource> data = new HashMap<>();
data.put("FOO", new TestUtils.MockSqlStreamsNestedDataSource());
QueryPlanner planner = new QueryPlanner(state.schema());
AbstractStreamsProcessor proc = planner.compile(data, sql);
// inject output bolt
proc.outputStream().to(new TestUtils.MockBolt());
final StormTopology topo = proc.build();
SqlTestUtil.runStormTopology(cluster, TestUtils.MockBolt.getCollectedValues(), EXPECTED_VALUE_SIZE, proc, topo);
Map<String, Integer> map = ImmutableMap.of("b", 2, "c", 4);
Map<String, Map<String, Integer>> nestedMap = ImmutableMap.of("a", map);
Assert.assertArrayEquals(new Values[] { new Values(2, 4, nestedMap, Arrays.asList(100, 200, 300)) }, TestUtils.MockBolt.getCollectedValues().toArray());
}
Aggregations