use of org.openkilda.wfm.topology.TopologyConfig in project open-kilda by telstra.
the class OFEventWfmTest method basicLinkDiscoveryTest.
/**
* BasicLinkDiscoveryTest will exercise the basics of Link Discovery test.
* The key results should show up in a kafka topic, which are dumped to file.
*/
@Test
@Ignore
public void basicLinkDiscoveryTest() throws IOException, ConfigurationException, CmdLineException {
System.out.println("==> Starting BasicLinkDiscoveryTest");
OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
TopologyConfig config = manager.getConfig();
String topo_input_topic = config.getKafkaTopoDiscoTopic();
Tuple tuple;
KeyValueState<String, Object> state = new InMemoryKeyValueState<>();
initMocks(topo_input_topic);
List<PathNode> nodes = Arrays.asList(new PathNode("sw1", 1, 0, 10L), new PathNode("sw2", 2, 1, 10L));
InfoData data = new IslInfoData(10L, nodes, 10000L, IslChangeType.DISCOVERED, 9000L);
String isl_discovered = MAPPER.writeValueAsString(data);
OFELinkBolt linkBolt = new OFELinkBolt(config);
linkBolt.prepare(stormConfig(), topologyContext, outputCollector);
linkBolt.initState(state);
ArrayList<DiscoveryFilterEntity> skipNodes = new ArrayList<>(1);
skipNodes.add(new DiscoveryFilterEntity("sw1", "1"));
CommandMessage islFilterSetup = new CommandMessage(new DiscoveryFilterPopulateData(skipNodes), 1, "discovery-test", Destination.WFM_OF_DISCOVERY);
String json = MAPPER.writeValueAsString(islFilterSetup);
tuple = new TupleImpl(topologyContext, Collections.singletonList(json), 4, "message");
linkBolt.execute(tuple);
tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", OFEMessageUtils.SWITCH_UP), 0, topo_input_topic);
linkBolt.execute(tuple);
tuple = new TupleImpl(topologyContext, Arrays.asList("sw2", OFEMessageUtils.SWITCH_UP), 0, topo_input_topic);
linkBolt.execute(tuple);
tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", "1", OFEMessageUtils.PORT_UP), 1, topo_input_topic);
linkBolt.execute(tuple);
tuple = new TupleImpl(topologyContext, Arrays.asList("sw1", "2", OFEMessageUtils.PORT_UP), 1, topo_input_topic);
linkBolt.execute(tuple);
Tuple tickTuple = new TupleImpl(topologyContext, Collections.emptyList(), 2, Constants.SYSTEM_TICK_STREAM_ID);
linkBolt.execute(tickTuple);
tuple = new TupleImpl(topologyContext, Collections.singletonList(isl_discovered), 3, topo_input_topic);
linkBolt.execute(tuple);
linkBolt.execute(tickTuple);
linkBolt.execute(tickTuple);
// 1 isls, 3 seconds interval, 9 seconds test duration == 3 discovery commands
// there is only 1 isl each cycle because of isl filter
// messagesExpected = 3 ;
// TODO: (crimi) validate is 7 due to merged topics
messagesExpected = 7;
messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
Assert.assertEquals(messagesExpected, messagesReceived);
// "isl discovered" x1
// messagesExpected = 1;
// TODO: (crimi) validate is 7 due to merged topics
messagesExpected = 7;
messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
Assert.assertEquals(messagesExpected, messagesReceived);
linkBolt.execute(tickTuple);
// no new discovery commands
// messagesExpected = 3;
// TODO .. increased from 3 to 7 due to topic changes .. confirm it
messagesExpected = 7;
messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
Assert.assertEquals(messagesExpected, messagesReceived);
// +1 discovery fails
// messagesExpected = 2;
// TODO .. there should be more or we aren't looking in right place
messagesExpected = 7;
messagesReceived = outputCollectorMock.getMessagesCount(config.getKafkaTopoDiscoTopic());
Assert.assertEquals(messagesExpected, messagesReceived);
}
use of org.openkilda.wfm.topology.TopologyConfig in project open-kilda by telstra.
the class SimpleKafkaTest method allocateConfig.
@BeforeClass
public static void allocateConfig() throws ConfigurationException, CmdLineException {
String[] args = {};
config = new TopologyConfig((new LaunchEnvironment(args)).makePropertiesReader());
}
use of org.openkilda.wfm.topology.TopologyConfig in project open-kilda by telstra.
the class OFEventWfmTest method BasicSwitchPortEventsTest.
@Test
@Ignore
public void BasicSwitchPortEventsTest() throws Exception {
System.out.println("==> Starting BasicSwitchEventTest");
// TOOD: Is this test still valide, without the deprecated Switch/Port bolts?
OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
TopologyConfig config = manager.getConfig();
String sw1_up = OFEMessageUtils.createSwitchDataMessage(OFEMessageUtils.SWITCH_UP, "sw1");
String sw2_up = OFEMessageUtils.createSwitchDataMessage(OFEMessageUtils.SWITCH_UP, "sw2");
String sw1p1_up = OFEMessageUtils.createPortDataMessage(OFEMessageUtils.PORT_UP, "sw1", "1");
String sw2p2_up = OFEMessageUtils.createPortDataMessage(OFEMessageUtils.PORT_UP, "sw2", "2");
String sw2p2_down = OFEMessageUtils.createPortDataMessage(OFEMessageUtils.PORT_DOWN, "sw2", "2");
String switch_topic = config.getKafkaTopoDiscoTopic();
String port_topic = config.getKafkaTopoDiscoTopic();
// send sw1 and sw2 up
kProducer.pushMessage(switch_topic, sw1_up);
kProducer.pushMessage(switch_topic, sw2_up);
// sent sw1/port1 up ... sw2/port2 up
kProducer.pushMessage(port_topic, sw1p1_up);
kProducer.pushMessage(port_topic, sw2p2_up);
// send duplicates ... NB: at present, dupes aren't detected until we do FieldGrouping
// probably should send duplicates in another test
kProducer.pushMessage(switch_topic, sw1_up);
kProducer.pushMessage(switch_topic, sw2_up);
kProducer.pushMessage(port_topic, sw1p1_up);
kProducer.pushMessage(port_topic, sw2p2_up);
Utils.sleep(4 * 1000);
// at present, everything is passed through, no filter.
messagesExpected = 8;
messagesReceived = safeLinesCount(discoFiler.getFiler().getFile());
Assert.assertEquals(messagesExpected, messagesReceived);
Utils.sleep(1 * 1000);
// sending this now just for fun .. we'll more formally test that the ISL state is correct.
kProducer.pushMessage(port_topic, sw2p2_down);
Utils.sleep(2 * 1000);
// TODO: how can we programmatically determine how many ISL messages should be generated?
messagesReceived = safeLinesCount(discoFiler.getFiler().getFile());
if (messagesReceived == 0) {
System.out.println("Message count failure; NO MESSAGES RECEIVED!");
for (String s : Files.readLines(discoFiler.getFiler().getFile(), Charsets.UTF_8)) {
System.out.println("\t\t > " + s);
}
}
// NB: ISL discovery messages will be generated .. multiple .. at present 9-11.
Assert.assertTrue(messagesReceived > 0);
cluster.killTopology(manager.makeTopologyName());
cluster.killTopology("utils-1");
Utils.sleep(4 * 1000);
}
use of org.openkilda.wfm.topology.TopologyConfig in project open-kilda by telstra.
the class FlowTopologyTest method setupOnce.
@BeforeClass
public static void setupOnce() throws Exception {
AbstractStormTest.setupOnce();
flowTopology = new FlowTopology(makeLaunchEnvironment(), new PathComputerAuth());
topologyConfig = flowTopology.getConfig();
StormTopology stormTopology = flowTopology.createTopology();
Config config = stormConfig();
cluster.submitTopology(FlowTopologyTest.class.getSimpleName(), config, stormTopology);
nbConsumer = new TestKafkaConsumer(topologyConfig.getKafkaNorthboundTopic(), Destination.NORTHBOUND, kafkaProperties(UUID.nameUUIDFromBytes(Destination.NORTHBOUND.toString().getBytes()).toString()));
nbConsumer.start();
ofsConsumer = new TestKafkaConsumer(topologyConfig.getKafkaSpeakerTopic(), Destination.CONTROLLER, kafkaProperties(UUID.nameUUIDFromBytes(Destination.CONTROLLER.toString().getBytes()).toString()));
ofsConsumer.start();
cacheConsumer = new TestKafkaConsumer(topologyConfig.getKafkaTopoCacheTopic(), null, kafkaProperties(UUID.nameUUIDFromBytes(Destination.TOPOLOGY_ENGINE.toString().getBytes()).toString()));
cacheConsumer.start();
// teResponseConsumer = new TestKafkaConsumer(topologyConfig.getKafkaTopoEngTopic(),
teResponseConsumer = new TestKafkaConsumer(topologyConfig.getKafkaFlowTopic(), Destination.WFM, kafkaProperties(UUID.nameUUIDFromBytes(Destination.WFM.toString().getBytes()).toString()));
teResponseConsumer.start();
ctrlConsumer = new TestKafkaConsumer(flowTopology.getConfig().getKafkaCtrlTopic(), Destination.CTRL_CLIENT, kafkaProperties(UUID.nameUUIDFromBytes(Destination.CTRL_CLIENT.toString().getBytes()).toString()));
ctrlConsumer.start();
Utils.sleep(10000);
}
Aggregations