use of org.apache.storm.generated.StormTopology in project streamline by hortonworks.
the class NormalizationTopologyTest method testNormalizationTopology.
public void testNormalizationTopology(NormalizationProcessor normalizationProcessor) throws Exception {
final Config config = new Config();
config.setDebug(true);
final String topologyName = "SplitJoinTopologyTest";
final StormTopology topology = createTopology(normalizationProcessor);
log.info("Created topology with name: [{}] and topology: [{}]", topologyName, topology);
ILocalCluster localCluster = new LocalCluster();
log.info("Submitting topology: [{}]", topologyName);
localCluster.submitTopology(topologyName, config, topology);
Thread.sleep(2000);
localCluster.shutdown();
}
use of org.apache.storm.generated.StormTopology in project incubator-heron by apache.
the class GeneralTopologyContext method getRawTopology.
/**
* Gets the Thrift object representing the topology.
*
* @return the Thrift definition representing the topology
*/
@SuppressWarnings("deprecation")
public StormTopology getRawTopology() {
StormTopology stormTopology = new StormTopology();
Map<String, SpoutSpec> spouts = new HashMap<>();
for (TopologyAPI.Spout spout : this.delegate.getRawTopology().getSpoutsList()) {
spouts.put(spout.getComp().getName(), new SpoutSpec(spout));
}
Map<String, Bolt> bolts = new HashMap<>();
for (TopologyAPI.Bolt bolt : this.delegate.getRawTopology().getBoltsList()) {
bolts.put(bolt.getComp().getName(), new Bolt(bolt));
}
stormTopology.set_spouts(spouts);
stormTopology.set_bolts(bolts);
return stormTopology;
}
use of org.apache.storm.generated.StormTopology in project incubator-heron by apache.
the class EcoSubmitterTest method submitTopology_AllGood_BehavesAsExpected.
@Test
public void submitTopology_AllGood_BehavesAsExpected() throws Exception {
Config config = new Config();
StormTopology topology = new StormTopology();
PowerMockito.spy(StormSubmitter.class);
PowerMockito.doNothing().when(StormSubmitter.class, "submitTopology", any(String.class), any(Config.class), any(StormTopology.class));
subject.submitTopology("name", config, topology);
PowerMockito.verifyStatic(times(1));
StormSubmitter.submitTopology(anyString(), any(Config.class), any(StormTopology.class));
}
use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.
the class CacheTopologyTest method setupOnce.
@BeforeClass
public static void setupOnce() throws Exception {
AbstractStormTest.setupOnce();
flows.add(firstFlow);
flows.add(secondFlow);
topology = new CacheTopology(makeLaunchEnvironment());
StormTopology stormTopology = topology.createTopology();
Config config = stormConfig();
cluster.submitTopology(CacheTopologyTest.class.getSimpleName(), config, stormTopology);
teConsumer = new TestKafkaConsumer(topology.getConfig().getKafkaTopoEngTopic(), Destination.TOPOLOGY_ENGINE, kafkaProperties(UUID.nameUUIDFromBytes(Destination.TOPOLOGY_ENGINE.toString().getBytes()).toString()));
teConsumer.start();
flowConsumer = new TestKafkaConsumer(topology.getConfig().getKafkaFlowTopic(), Destination.WFM, kafkaProperties(UUID.nameUUIDFromBytes(Destination.WFM.toString().getBytes()).toString()));
flowConsumer.start();
ctrlConsumer = new TestKafkaConsumer(topology.getConfig().getKafkaCtrlTopic(), Destination.CTRL_CLIENT, kafkaProperties(UUID.nameUUIDFromBytes(Destination.CTRL_CLIENT.toString().getBytes()).toString()));
ctrlConsumer.start();
}
use of org.apache.storm.generated.StormTopology in project open-kilda by telstra.
the class OFELinkBoltFloodTest method warmBoltOnHighLoadedTopic.
@Test(timeout = 5000 * 60)
public void warmBoltOnHighLoadedTopic() throws Exception {
topology = new OFEventWFMTopology(makeLaunchEnvironment());
teConsumer = new TestKafkaConsumer(topology.getConfig().getKafkaTopoEngTopic(), kafkaProperties(UUID.nameUUIDFromBytes(Destination.TOPOLOGY_ENGINE.toString().getBytes()).toString()));
teConsumer.start();
// Size of messages in topic before bolt start
final int floodSize = 100000;
SwitchInfoData data = new SwitchInfoData("switchId", SwitchState.ADDED, "address", "hostname", "description", "controller");
InfoMessage message = new InfoMessage(data, System.currentTimeMillis(), UUID.randomUUID().toString());
// Floooding
sendMessages(message, topology.getConfig().getKafkaTopoDiscoTopic(), floodSize);
StormTopology stormTopology = topology.createTopology();
Config config = stormConfig();
cluster.submitTopology(OFELinkBoltFloodTest.class.getSimpleName(), config, stormTopology);
NetworkInfoData dump = new NetworkInfoData("test", Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet());
InfoMessage info = new InfoMessage(dump, 0, DEFAULT_CORRELATION_ID, Destination.WFM);
String request = objectMapper.writeValueAsString(info);
// Send DumpMessage to topic with offset floodSize+1.
kProducer.pushMessage(topology.getConfig().getKafkaTopoDiscoTopic(), request);
// Wait all messages
int pooled = 0;
while (pooled < floodSize) {
if (teConsumer.pollMessage() != null)
++pooled;
}
assertEquals(floodSize, pooled);
}
Aggregations